diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2007-01-31 12:14:08 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-02-03 18:35:10 -0500 |
commit | 46121cf7d85869bfe9588bac7ccf55aa0bc7f278 (patch) | |
tree | 51172f1b47dd8aa99e6f96c495fe7f8e2c0bbc34 /net | |
parent | 8885cb367f86ce02bed3bf18192d74a53ac3b81f (diff) |
SUNRPC: fix print format for tk_pid
The tk_pid field is an unsigned short. The proper print format specifier for
that type is %5u, not %4d.
Also clean up some miscellaneous print formatting nits.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth.c | 25 | ||||
-rw-r--r-- | net/sunrpc/auth_unix.c | 9 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 3 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 129 | ||||
-rw-r--r-- | net/sunrpc/pmap_clnt.c | 18 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 66 | ||||
-rw-r--r-- | net/sunrpc/stats.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 6 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 57 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 107 |
10 files changed, 228 insertions, 198 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 993ff1a5d945..5752f294751f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -181,7 +181,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) | |||
181 | struct rpc_cred *cred; | 181 | struct rpc_cred *cred; |
182 | int i; | 182 | int i; |
183 | 183 | ||
184 | dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); | 184 | dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); |
185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { | 185 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { |
186 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { | 186 | hlist_for_each_safe(pos, next, &cache->hashtable[i]) { |
187 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); | 187 | cred = hlist_entry(pos, struct rpc_cred, cr_hash); |
@@ -267,7 +267,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags) | |||
267 | }; | 267 | }; |
268 | struct rpc_cred *ret; | 268 | struct rpc_cred *ret; |
269 | 269 | ||
270 | dprintk("RPC: looking up %s cred\n", | 270 | dprintk("RPC: looking up %s cred\n", |
271 | auth->au_ops->au_name); | 271 | auth->au_ops->au_name); |
272 | get_group_info(acred.group_info); | 272 | get_group_info(acred.group_info); |
273 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); | 273 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); |
@@ -287,7 +287,7 @@ rpcauth_bindcred(struct rpc_task *task) | |||
287 | struct rpc_cred *ret; | 287 | struct rpc_cred *ret; |
288 | int flags = 0; | 288 | int flags = 0; |
289 | 289 | ||
290 | dprintk("RPC: %4d looking up %s cred\n", | 290 | dprintk("RPC: %5u looking up %s cred\n", |
291 | task->tk_pid, task->tk_auth->au_ops->au_name); | 291 | task->tk_pid, task->tk_auth->au_ops->au_name); |
292 | get_group_info(acred.group_info); | 292 | get_group_info(acred.group_info); |
293 | if (task->tk_flags & RPC_TASK_ROOTCREDS) | 293 | if (task->tk_flags & RPC_TASK_ROOTCREDS) |
@@ -304,8 +304,9 @@ rpcauth_bindcred(struct rpc_task *task) | |||
304 | void | 304 | void |
305 | rpcauth_holdcred(struct rpc_task *task) | 305 | rpcauth_holdcred(struct rpc_task *task) |
306 | { | 306 | { |
307 | dprintk("RPC: %4d holding %s cred %p\n", | 307 | dprintk("RPC: %5u holding %s cred %p\n", |
308 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); | 308 | task->tk_pid, task->tk_auth->au_ops->au_name, |
309 | task->tk_msg.rpc_cred); | ||
309 | if (task->tk_msg.rpc_cred) | 310 | if (task->tk_msg.rpc_cred) |
310 | get_rpccred(task->tk_msg.rpc_cred); | 311 | get_rpccred(task->tk_msg.rpc_cred); |
311 | } | 312 | } |
@@ -324,7 +325,7 @@ rpcauth_unbindcred(struct rpc_task *task) | |||
324 | { | 325 | { |
325 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 326 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
326 | 327 | ||
327 | dprintk("RPC: %4d releasing %s cred %p\n", | 328 | dprintk("RPC: %5u releasing %s cred %p\n", |
328 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 329 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
329 | 330 | ||
330 | put_rpccred(cred); | 331 | put_rpccred(cred); |
@@ -336,7 +337,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p) | |||
336 | { | 337 | { |
337 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 338 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
338 | 339 | ||
339 | dprintk("RPC: %4d marshaling %s cred %p\n", | 340 | dprintk("RPC: %5u marshaling %s cred %p\n", |
340 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 341 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
341 | 342 | ||
342 | return cred->cr_ops->crmarshal(task, p); | 343 | return cred->cr_ops->crmarshal(task, p); |
@@ -347,7 +348,7 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p) | |||
347 | { | 348 | { |
348 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 349 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
349 | 350 | ||
350 | dprintk("RPC: %4d validating %s cred %p\n", | 351 | dprintk("RPC: %5u validating %s cred %p\n", |
351 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 352 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
352 | 353 | ||
353 | return cred->cr_ops->crvalidate(task, p); | 354 | return cred->cr_ops->crvalidate(task, p); |
@@ -359,7 +360,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | |||
359 | { | 360 | { |
360 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 361 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
361 | 362 | ||
362 | dprintk("RPC: %4d using %s cred %p to wrap rpc data\n", | 363 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", |
363 | task->tk_pid, cred->cr_ops->cr_name, cred); | 364 | task->tk_pid, cred->cr_ops->cr_name, cred); |
364 | if (cred->cr_ops->crwrap_req) | 365 | if (cred->cr_ops->crwrap_req) |
365 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); | 366 | return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); |
@@ -373,7 +374,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
373 | { | 374 | { |
374 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 375 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
375 | 376 | ||
376 | dprintk("RPC: %4d using %s cred %p to unwrap rpc data\n", | 377 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", |
377 | task->tk_pid, cred->cr_ops->cr_name, cred); | 378 | task->tk_pid, cred->cr_ops->cr_name, cred); |
378 | if (cred->cr_ops->crunwrap_resp) | 379 | if (cred->cr_ops->crunwrap_resp) |
379 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, | 380 | return cred->cr_ops->crunwrap_resp(task, decode, rqstp, |
@@ -388,7 +389,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
388 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 389 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
389 | int err; | 390 | int err; |
390 | 391 | ||
391 | dprintk("RPC: %4d refreshing %s cred %p\n", | 392 | dprintk("RPC: %5u refreshing %s cred %p\n", |
392 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); | 393 | task->tk_pid, task->tk_auth->au_ops->au_name, cred); |
393 | 394 | ||
394 | err = cred->cr_ops->crrefresh(task); | 395 | err = cred->cr_ops->crrefresh(task); |
@@ -400,7 +401,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
400 | void | 401 | void |
401 | rpcauth_invalcred(struct rpc_task *task) | 402 | rpcauth_invalcred(struct rpc_task *task) |
402 | { | 403 | { |
403 | dprintk("RPC: %4d invalidating %s cred %p\n", | 404 | dprintk("RPC: %5u invalidating %s cred %p\n", |
404 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); | 405 | task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); |
405 | spin_lock(&rpc_credcache_lock); | 406 | spin_lock(&rpc_credcache_lock); |
406 | if (task->tk_msg.rpc_cred) | 407 | if (task->tk_msg.rpc_cred) |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index f7f990c9afe2..4e7733aee36e 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -39,7 +39,8 @@ static struct rpc_credops unix_credops; | |||
39 | static struct rpc_auth * | 39 | static struct rpc_auth * |
40 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | 40 | unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) |
41 | { | 41 | { |
42 | dprintk("RPC: creating UNIX authenticator for client %p\n", clnt); | 42 | dprintk("RPC: creating UNIX authenticator for client %p\n", |
43 | clnt); | ||
43 | if (atomic_inc_return(&unix_auth.au_count) == 0) | 44 | if (atomic_inc_return(&unix_auth.au_count) == 0) |
44 | unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); | 45 | unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); |
45 | return &unix_auth; | 46 | return &unix_auth; |
@@ -48,7 +49,7 @@ unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
48 | static void | 49 | static void |
49 | unx_destroy(struct rpc_auth *auth) | 50 | unx_destroy(struct rpc_auth *auth) |
50 | { | 51 | { |
51 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); | 52 | dprintk("RPC: destroying UNIX authenticator %p\n", auth); |
52 | rpcauth_free_credcache(auth); | 53 | rpcauth_free_credcache(auth); |
53 | } | 54 | } |
54 | 55 | ||
@@ -67,8 +68,8 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
67 | struct unx_cred *cred; | 68 | struct unx_cred *cred; |
68 | int i; | 69 | int i; |
69 | 70 | ||
70 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", | 71 | dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", |
71 | acred->uid, acred->gid); | 72 | acred->uid, acred->gid); |
72 | 73 | ||
73 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) | 74 | if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) |
74 | return ERR_PTR(-ENOMEM); | 75 | return ERR_PTR(-ENOMEM); |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 14274490f92e..9e72223487fa 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -215,7 +215,8 @@ int cache_check(struct cache_detail *detail, | |||
215 | if (rv == -EAGAIN) | 215 | if (rv == -EAGAIN) |
216 | rv = -ENOENT; | 216 | rv = -ENOENT; |
217 | } else if (rv == -EAGAIN || age > refresh_age/2) { | 217 | } else if (rv == -EAGAIN || age > refresh_age/2) { |
218 | dprintk("Want update, refage=%ld, age=%ld\n", refresh_age, age); | 218 | dprintk("RPC: Want update, refage=%ld, age=%ld\n", |
219 | refresh_age, age); | ||
219 | if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { | 220 | if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { |
220 | switch (cache_make_upcall(detail, h)) { | 221 | switch (cache_make_upcall(detail, h)) { |
221 | case -EINVAL: | 222 | case -EINVAL: |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index e9d5f3c562e5..393e70aee181 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -42,6 +42,10 @@ | |||
42 | # define RPCDBG_FACILITY RPCDBG_CALL | 42 | # define RPCDBG_FACILITY RPCDBG_CALL |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #define dprint_status(t) \ | ||
46 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ | ||
47 | __FUNCTION__, t->tk_status) | ||
48 | |||
45 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); | 49 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); |
46 | 50 | ||
47 | 51 | ||
@@ -106,8 +110,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
106 | int err; | 110 | int err; |
107 | int len; | 111 | int len; |
108 | 112 | ||
109 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | 113 | dprintk("RPC: creating %s client for %s (xprt %p)\n", |
110 | program->name, servname, xprt); | 114 | program->name, servname, xprt); |
111 | 115 | ||
112 | err = -EINVAL; | 116 | err = -EINVAL; |
113 | if (!xprt) | 117 | if (!xprt) |
@@ -220,7 +224,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
220 | xprt->resvport = 0; | 224 | xprt->resvport = 0; |
221 | 225 | ||
222 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | 226 | dprintk("RPC: creating %s client for %s (xprt %p)\n", |
223 | args->program->name, args->servername, xprt); | 227 | args->program->name, args->servername, xprt); |
224 | 228 | ||
225 | clnt = rpc_new_client(xprt, args->servername, args->program, | 229 | clnt = rpc_new_client(xprt, args->servername, args->program, |
226 | args->version, args->authflavor); | 230 | args->version, args->authflavor); |
@@ -288,7 +292,7 @@ out_no_path: | |||
288 | out_no_stats: | 292 | out_no_stats: |
289 | kfree(new); | 293 | kfree(new); |
290 | out_no_clnt: | 294 | out_no_clnt: |
291 | dprintk("RPC: %s returned error %d\n", __FUNCTION__, err); | 295 | dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err); |
292 | return ERR_PTR(err); | 296 | return ERR_PTR(err); |
293 | } | 297 | } |
294 | 298 | ||
@@ -301,7 +305,7 @@ out_no_clnt: | |||
301 | int | 305 | int |
302 | rpc_shutdown_client(struct rpc_clnt *clnt) | 306 | rpc_shutdown_client(struct rpc_clnt *clnt) |
303 | { | 307 | { |
304 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", | 308 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", |
305 | clnt->cl_protname, clnt->cl_server, | 309 | clnt->cl_protname, clnt->cl_server, |
306 | atomic_read(&clnt->cl_users)); | 310 | atomic_read(&clnt->cl_users)); |
307 | 311 | ||
@@ -336,7 +340,7 @@ rpc_destroy_client(struct rpc_clnt *clnt) | |||
336 | return 1; | 340 | return 1; |
337 | BUG_ON(atomic_read(&clnt->cl_users) != 0); | 341 | BUG_ON(atomic_read(&clnt->cl_users) != 0); |
338 | 342 | ||
339 | dprintk("RPC: destroying %s client for %s\n", | 343 | dprintk("RPC: destroying %s client for %s\n", |
340 | clnt->cl_protname, clnt->cl_server); | 344 | clnt->cl_protname, clnt->cl_server); |
341 | if (clnt->cl_auth) { | 345 | if (clnt->cl_auth) { |
342 | rpcauth_destroy(clnt->cl_auth); | 346 | rpcauth_destroy(clnt->cl_auth); |
@@ -366,8 +370,8 @@ out_free: | |||
366 | void | 370 | void |
367 | rpc_release_client(struct rpc_clnt *clnt) | 371 | rpc_release_client(struct rpc_clnt *clnt) |
368 | { | 372 | { |
369 | dprintk("RPC: rpc_release_client(%p, %d)\n", | 373 | dprintk("RPC: rpc_release_client(%p, %d)\n", |
370 | clnt, atomic_read(&clnt->cl_users)); | 374 | clnt, atomic_read(&clnt->cl_users)); |
371 | 375 | ||
372 | if (!atomic_dec_and_test(&clnt->cl_users)) | 376 | if (!atomic_dec_and_test(&clnt->cl_users)) |
373 | return; | 377 | return; |
@@ -654,9 +658,10 @@ call_start(struct rpc_task *task) | |||
654 | { | 658 | { |
655 | struct rpc_clnt *clnt = task->tk_client; | 659 | struct rpc_clnt *clnt = task->tk_client; |
656 | 660 | ||
657 | dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, | 661 | dprintk("RPC: %5u call_start %s%d proc %d (%s)\n", task->tk_pid, |
658 | clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, | 662 | clnt->cl_protname, clnt->cl_vers, |
659 | (RPC_IS_ASYNC(task) ? "async" : "sync")); | 663 | task->tk_msg.rpc_proc->p_proc, |
664 | (RPC_IS_ASYNC(task) ? "async" : "sync")); | ||
660 | 665 | ||
661 | /* Increment call count */ | 666 | /* Increment call count */ |
662 | task->tk_msg.rpc_proc->p_count++; | 667 | task->tk_msg.rpc_proc->p_count++; |
@@ -670,7 +675,7 @@ call_start(struct rpc_task *task) | |||
670 | static void | 675 | static void |
671 | call_reserve(struct rpc_task *task) | 676 | call_reserve(struct rpc_task *task) |
672 | { | 677 | { |
673 | dprintk("RPC: %4d call_reserve\n", task->tk_pid); | 678 | dprint_status(task); |
674 | 679 | ||
675 | if (!rpcauth_uptodatecred(task)) { | 680 | if (!rpcauth_uptodatecred(task)) { |
676 | task->tk_action = call_refresh; | 681 | task->tk_action = call_refresh; |
@@ -690,8 +695,7 @@ call_reserveresult(struct rpc_task *task) | |||
690 | { | 695 | { |
691 | int status = task->tk_status; | 696 | int status = task->tk_status; |
692 | 697 | ||
693 | dprintk("RPC: %4d call_reserveresult (status %d)\n", | 698 | dprint_status(task); |
694 | task->tk_pid, task->tk_status); | ||
695 | 699 | ||
696 | /* | 700 | /* |
697 | * After a call to xprt_reserve(), we must have either | 701 | * After a call to xprt_reserve(), we must have either |
@@ -745,8 +749,8 @@ call_allocate(struct rpc_task *task) | |||
745 | struct rpc_xprt *xprt = task->tk_xprt; | 749 | struct rpc_xprt *xprt = task->tk_xprt; |
746 | unsigned int bufsiz; | 750 | unsigned int bufsiz; |
747 | 751 | ||
748 | dprintk("RPC: %4d call_allocate (status %d)\n", | 752 | dprint_status(task); |
749 | task->tk_pid, task->tk_status); | 753 | |
750 | task->tk_action = call_bind; | 754 | task->tk_action = call_bind; |
751 | if (req->rq_buffer) | 755 | if (req->rq_buffer) |
752 | return; | 756 | return; |
@@ -757,7 +761,8 @@ call_allocate(struct rpc_task *task) | |||
757 | 761 | ||
758 | if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) | 762 | if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) |
759 | return; | 763 | return; |
760 | printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); | 764 | |
765 | dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); | ||
761 | 766 | ||
762 | if (RPC_IS_ASYNC(task) || !signalled()) { | 767 | if (RPC_IS_ASYNC(task) || !signalled()) { |
763 | xprt_release(task); | 768 | xprt_release(task); |
@@ -794,8 +799,7 @@ call_encode(struct rpc_task *task) | |||
794 | kxdrproc_t encode; | 799 | kxdrproc_t encode; |
795 | __be32 *p; | 800 | __be32 *p; |
796 | 801 | ||
797 | dprintk("RPC: %4d call_encode (status %d)\n", | 802 | dprint_status(task); |
798 | task->tk_pid, task->tk_status); | ||
799 | 803 | ||
800 | /* Default buffer setup */ | 804 | /* Default buffer setup */ |
801 | bufsiz = req->rq_bufsize >> 1; | 805 | bufsiz = req->rq_bufsize >> 1; |
@@ -841,8 +845,7 @@ call_bind(struct rpc_task *task) | |||
841 | { | 845 | { |
842 | struct rpc_xprt *xprt = task->tk_xprt; | 846 | struct rpc_xprt *xprt = task->tk_xprt; |
843 | 847 | ||
844 | dprintk("RPC: %4d call_bind (status %d)\n", | 848 | dprint_status(task); |
845 | task->tk_pid, task->tk_status); | ||
846 | 849 | ||
847 | task->tk_action = call_connect; | 850 | task->tk_action = call_connect; |
848 | if (!xprt_bound(xprt)) { | 851 | if (!xprt_bound(xprt)) { |
@@ -861,8 +864,7 @@ call_bind_status(struct rpc_task *task) | |||
861 | int status = -EACCES; | 864 | int status = -EACCES; |
862 | 865 | ||
863 | if (task->tk_status >= 0) { | 866 | if (task->tk_status >= 0) { |
864 | dprintk("RPC: %4d call_bind_status (status %d)\n", | 867 | dprint_status(task); |
865 | task->tk_pid, task->tk_status); | ||
866 | task->tk_status = 0; | 868 | task->tk_status = 0; |
867 | task->tk_action = call_connect; | 869 | task->tk_action = call_connect; |
868 | return; | 870 | return; |
@@ -870,24 +872,24 @@ call_bind_status(struct rpc_task *task) | |||
870 | 872 | ||
871 | switch (task->tk_status) { | 873 | switch (task->tk_status) { |
872 | case -EACCES: | 874 | case -EACCES: |
873 | dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", | 875 | dprintk("RPC: %5u remote rpcbind: RPC program/version " |
874 | task->tk_pid); | 876 | "unavailable\n", task->tk_pid); |
875 | rpc_delay(task, 3*HZ); | 877 | rpc_delay(task, 3*HZ); |
876 | goto retry_timeout; | 878 | goto retry_timeout; |
877 | case -ETIMEDOUT: | 879 | case -ETIMEDOUT: |
878 | dprintk("RPC: %4d rpcbind request timed out\n", | 880 | dprintk("RPC: %5u rpcbind request timed out\n", |
879 | task->tk_pid); | 881 | task->tk_pid); |
880 | goto retry_timeout; | 882 | goto retry_timeout; |
881 | case -EPFNOSUPPORT: | 883 | case -EPFNOSUPPORT: |
882 | dprintk("RPC: %4d remote rpcbind service unavailable\n", | 884 | dprintk("RPC: %5u remote rpcbind service unavailable\n", |
883 | task->tk_pid); | 885 | task->tk_pid); |
884 | break; | 886 | break; |
885 | case -EPROTONOSUPPORT: | 887 | case -EPROTONOSUPPORT: |
886 | dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", | 888 | dprintk("RPC: %5u remote rpcbind version 2 unavailable\n", |
887 | task->tk_pid); | 889 | task->tk_pid); |
888 | break; | 890 | break; |
889 | default: | 891 | default: |
890 | dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", | 892 | dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", |
891 | task->tk_pid, -task->tk_status); | 893 | task->tk_pid, -task->tk_status); |
892 | status = -EIO; | 894 | status = -EIO; |
893 | } | 895 | } |
@@ -907,7 +909,7 @@ call_connect(struct rpc_task *task) | |||
907 | { | 909 | { |
908 | struct rpc_xprt *xprt = task->tk_xprt; | 910 | struct rpc_xprt *xprt = task->tk_xprt; |
909 | 911 | ||
910 | dprintk("RPC: %4d call_connect xprt %p %s connected\n", | 912 | dprintk("RPC: %5u call_connect xprt %p %s connected\n", |
911 | task->tk_pid, xprt, | 913 | task->tk_pid, xprt, |
912 | (xprt_connected(xprt) ? "is" : "is not")); | 914 | (xprt_connected(xprt) ? "is" : "is not")); |
913 | 915 | ||
@@ -929,8 +931,7 @@ call_connect_status(struct rpc_task *task) | |||
929 | struct rpc_clnt *clnt = task->tk_client; | 931 | struct rpc_clnt *clnt = task->tk_client; |
930 | int status = task->tk_status; | 932 | int status = task->tk_status; |
931 | 933 | ||
932 | dprintk("RPC: %5u call_connect_status (status %d)\n", | 934 | dprint_status(task); |
933 | task->tk_pid, task->tk_status); | ||
934 | 935 | ||
935 | task->tk_status = 0; | 936 | task->tk_status = 0; |
936 | if (status >= 0) { | 937 | if (status >= 0) { |
@@ -962,8 +963,7 @@ call_connect_status(struct rpc_task *task) | |||
962 | static void | 963 | static void |
963 | call_transmit(struct rpc_task *task) | 964 | call_transmit(struct rpc_task *task) |
964 | { | 965 | { |
965 | dprintk("RPC: %4d call_transmit (status %d)\n", | 966 | dprint_status(task); |
966 | task->tk_pid, task->tk_status); | ||
967 | 967 | ||
968 | task->tk_action = call_status; | 968 | task->tk_action = call_status; |
969 | if (task->tk_status < 0) | 969 | if (task->tk_status < 0) |
@@ -1024,8 +1024,7 @@ call_status(struct rpc_task *task) | |||
1024 | if (req->rq_received > 0 && !req->rq_bytes_sent) | 1024 | if (req->rq_received > 0 && !req->rq_bytes_sent) |
1025 | task->tk_status = req->rq_received; | 1025 | task->tk_status = req->rq_received; |
1026 | 1026 | ||
1027 | dprintk("RPC: %4d call_status (status %d)\n", | 1027 | dprint_status(task); |
1028 | task->tk_pid, task->tk_status); | ||
1029 | 1028 | ||
1030 | status = task->tk_status; | 1029 | status = task->tk_status; |
1031 | if (status >= 0) { | 1030 | if (status >= 0) { |
@@ -1076,11 +1075,11 @@ call_timeout(struct rpc_task *task) | |||
1076 | struct rpc_clnt *clnt = task->tk_client; | 1075 | struct rpc_clnt *clnt = task->tk_client; |
1077 | 1076 | ||
1078 | if (xprt_adjust_timeout(task->tk_rqstp) == 0) { | 1077 | if (xprt_adjust_timeout(task->tk_rqstp) == 0) { |
1079 | dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); | 1078 | dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); |
1080 | goto retry; | 1079 | goto retry; |
1081 | } | 1080 | } |
1082 | 1081 | ||
1083 | dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); | 1082 | dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); |
1084 | task->tk_timeouts++; | 1083 | task->tk_timeouts++; |
1085 | 1084 | ||
1086 | if (RPC_IS_SOFT(task)) { | 1085 | if (RPC_IS_SOFT(task)) { |
@@ -1114,8 +1113,8 @@ call_decode(struct rpc_task *task) | |||
1114 | kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; | 1113 | kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; |
1115 | __be32 *p; | 1114 | __be32 *p; |
1116 | 1115 | ||
1117 | dprintk("RPC: %4d call_decode (status %d)\n", | 1116 | dprintk("RPC: %5u call_decode (status %d)\n", |
1118 | task->tk_pid, task->tk_status); | 1117 | task->tk_pid, task->tk_status); |
1119 | 1118 | ||
1120 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { | 1119 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { |
1121 | printk(KERN_NOTICE "%s: server %s OK\n", | 1120 | printk(KERN_NOTICE "%s: server %s OK\n", |
@@ -1129,8 +1128,8 @@ call_decode(struct rpc_task *task) | |||
1129 | clnt->cl_stats->rpcretrans++; | 1128 | clnt->cl_stats->rpcretrans++; |
1130 | goto out_retry; | 1129 | goto out_retry; |
1131 | } | 1130 | } |
1132 | dprintk("%s: too small RPC reply size (%d bytes)\n", | 1131 | dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", |
1133 | clnt->cl_protname, task->tk_status); | 1132 | clnt->cl_protname, task->tk_status); |
1134 | task->tk_action = call_timeout; | 1133 | task->tk_action = call_timeout; |
1135 | goto out_retry; | 1134 | goto out_retry; |
1136 | } | 1135 | } |
@@ -1162,8 +1161,8 @@ call_decode(struct rpc_task *task) | |||
1162 | task->tk_msg.rpc_resp); | 1161 | task->tk_msg.rpc_resp); |
1163 | unlock_kernel(); | 1162 | unlock_kernel(); |
1164 | } | 1163 | } |
1165 | dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, | 1164 | dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, |
1166 | task->tk_status); | 1165 | task->tk_status); |
1167 | return; | 1166 | return; |
1168 | out_retry: | 1167 | out_retry: |
1169 | req->rq_received = req->rq_private_buf.len = 0; | 1168 | req->rq_received = req->rq_private_buf.len = 0; |
@@ -1176,7 +1175,7 @@ out_retry: | |||
1176 | static void | 1175 | static void |
1177 | call_refresh(struct rpc_task *task) | 1176 | call_refresh(struct rpc_task *task) |
1178 | { | 1177 | { |
1179 | dprintk("RPC: %4d call_refresh\n", task->tk_pid); | 1178 | dprint_status(task); |
1180 | 1179 | ||
1181 | xprt_release(task); /* Must do to obtain new XID */ | 1180 | xprt_release(task); /* Must do to obtain new XID */ |
1182 | task->tk_action = call_refreshresult; | 1181 | task->tk_action = call_refreshresult; |
@@ -1192,8 +1191,8 @@ static void | |||
1192 | call_refreshresult(struct rpc_task *task) | 1191 | call_refreshresult(struct rpc_task *task) |
1193 | { | 1192 | { |
1194 | int status = task->tk_status; | 1193 | int status = task->tk_status; |
1195 | dprintk("RPC: %4d call_refreshresult (status %d)\n", | 1194 | |
1196 | task->tk_pid, task->tk_status); | 1195 | dprint_status(task); |
1197 | 1196 | ||
1198 | task->tk_status = 0; | 1197 | task->tk_status = 0; |
1199 | task->tk_action = call_reserve; | 1198 | task->tk_action = call_reserve; |
@@ -1271,11 +1270,15 @@ call_verify(struct rpc_task *task) | |||
1271 | case RPC_AUTH_ERROR: | 1270 | case RPC_AUTH_ERROR: |
1272 | break; | 1271 | break; |
1273 | case RPC_MISMATCH: | 1272 | case RPC_MISMATCH: |
1274 | dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); | 1273 | dprintk("RPC: %5u %s: RPC call version " |
1274 | "mismatch!\n", | ||
1275 | task->tk_pid, __FUNCTION__); | ||
1275 | error = -EPROTONOSUPPORT; | 1276 | error = -EPROTONOSUPPORT; |
1276 | goto out_err; | 1277 | goto out_err; |
1277 | default: | 1278 | default: |
1278 | dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); | 1279 | dprintk("RPC: %5u %s: RPC call rejected, " |
1280 | "unknown error: %x\n", | ||
1281 | task->tk_pid, __FUNCTION__, n); | ||
1279 | goto out_eio; | 1282 | goto out_eio; |
1280 | } | 1283 | } |
1281 | if (--len < 0) | 1284 | if (--len < 0) |
@@ -1288,8 +1291,8 @@ call_verify(struct rpc_task *task) | |||
1288 | if (!task->tk_cred_retry) | 1291 | if (!task->tk_cred_retry) |
1289 | break; | 1292 | break; |
1290 | task->tk_cred_retry--; | 1293 | task->tk_cred_retry--; |
1291 | dprintk("RPC: %4d call_verify: retry stale creds\n", | 1294 | dprintk("RPC: %5u %s: retry stale creds\n", |
1292 | task->tk_pid); | 1295 | task->tk_pid, __FUNCTION__); |
1293 | rpcauth_invalcred(task); | 1296 | rpcauth_invalcred(task); |
1294 | task->tk_action = call_refresh; | 1297 | task->tk_action = call_refresh; |
1295 | goto out_retry; | 1298 | goto out_retry; |
@@ -1299,8 +1302,8 @@ call_verify(struct rpc_task *task) | |||
1299 | if (!task->tk_garb_retry) | 1302 | if (!task->tk_garb_retry) |
1300 | break; | 1303 | break; |
1301 | task->tk_garb_retry--; | 1304 | task->tk_garb_retry--; |
1302 | dprintk("RPC: %4d call_verify: retry garbled creds\n", | 1305 | dprintk("RPC: %5u %s: retry garbled creds\n", |
1303 | task->tk_pid); | 1306 | task->tk_pid, __FUNCTION__); |
1304 | task->tk_action = call_bind; | 1307 | task->tk_action = call_bind; |
1305 | goto out_retry; | 1308 | goto out_retry; |
1306 | case RPC_AUTH_TOOWEAK: | 1309 | case RPC_AUTH_TOOWEAK: |
@@ -1311,8 +1314,8 @@ call_verify(struct rpc_task *task) | |||
1311 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); | 1314 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); |
1312 | error = -EIO; | 1315 | error = -EIO; |
1313 | } | 1316 | } |
1314 | dprintk("RPC: %4d call_verify: call rejected %d\n", | 1317 | dprintk("RPC: %5u %s: call rejected %d\n", |
1315 | task->tk_pid, n); | 1318 | task->tk_pid, __FUNCTION__, n); |
1316 | goto out_err; | 1319 | goto out_err; |
1317 | } | 1320 | } |
1318 | if (!(p = rpcauth_checkverf(task, p))) { | 1321 | if (!(p = rpcauth_checkverf(task, p))) { |
@@ -1326,20 +1329,24 @@ call_verify(struct rpc_task *task) | |||
1326 | case RPC_SUCCESS: | 1329 | case RPC_SUCCESS: |
1327 | return p; | 1330 | return p; |
1328 | case RPC_PROG_UNAVAIL: | 1331 | case RPC_PROG_UNAVAIL: |
1329 | dprintk("RPC: call_verify: program %u is unsupported by server %s\n", | 1332 | dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", |
1333 | task->tk_pid, __FUNCTION__, | ||
1330 | (unsigned int)task->tk_client->cl_prog, | 1334 | (unsigned int)task->tk_client->cl_prog, |
1331 | task->tk_client->cl_server); | 1335 | task->tk_client->cl_server); |
1332 | error = -EPFNOSUPPORT; | 1336 | error = -EPFNOSUPPORT; |
1333 | goto out_err; | 1337 | goto out_err; |
1334 | case RPC_PROG_MISMATCH: | 1338 | case RPC_PROG_MISMATCH: |
1335 | dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", | 1339 | dprintk("RPC: %5u %s: program %u, version %u unsupported by " |
1340 | "server %s\n", task->tk_pid, __FUNCTION__, | ||
1336 | (unsigned int)task->tk_client->cl_prog, | 1341 | (unsigned int)task->tk_client->cl_prog, |
1337 | (unsigned int)task->tk_client->cl_vers, | 1342 | (unsigned int)task->tk_client->cl_vers, |
1338 | task->tk_client->cl_server); | 1343 | task->tk_client->cl_server); |
1339 | error = -EPROTONOSUPPORT; | 1344 | error = -EPROTONOSUPPORT; |
1340 | goto out_err; | 1345 | goto out_err; |
1341 | case RPC_PROC_UNAVAIL: | 1346 | case RPC_PROC_UNAVAIL: |
1342 | dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", | 1347 | dprintk("RPC: %5u %s: proc %p unsupported by program %u, " |
1348 | "version %u on server %s\n", | ||
1349 | task->tk_pid, __FUNCTION__, | ||
1343 | task->tk_msg.rpc_proc, | 1350 | task->tk_msg.rpc_proc, |
1344 | task->tk_client->cl_prog, | 1351 | task->tk_client->cl_prog, |
1345 | task->tk_client->cl_vers, | 1352 | task->tk_client->cl_vers, |
@@ -1347,7 +1354,8 @@ call_verify(struct rpc_task *task) | |||
1347 | error = -EOPNOTSUPP; | 1354 | error = -EOPNOTSUPP; |
1348 | goto out_err; | 1355 | goto out_err; |
1349 | case RPC_GARBAGE_ARGS: | 1356 | case RPC_GARBAGE_ARGS: |
1350 | dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); | 1357 | dprintk("RPC: %5u %s: server saw garbage\n", |
1358 | task->tk_pid, __FUNCTION__); | ||
1351 | break; /* retry */ | 1359 | break; /* retry */ |
1352 | default: | 1360 | default: |
1353 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); | 1361 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); |
@@ -1358,7 +1366,8 @@ out_garbage: | |||
1358 | task->tk_client->cl_stats->rpcgarbage++; | 1366 | task->tk_client->cl_stats->rpcgarbage++; |
1359 | if (task->tk_garb_retry) { | 1367 | if (task->tk_garb_retry) { |
1360 | task->tk_garb_retry--; | 1368 | task->tk_garb_retry--; |
1361 | dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); | 1369 | dprintk("RPC: %5u %s: retrying\n", |
1370 | task->tk_pid, __FUNCTION__); | ||
1362 | task->tk_action = call_bind; | 1371 | task->tk_action = call_bind; |
1363 | out_retry: | 1372 | out_retry: |
1364 | return ERR_PTR(-EAGAIN); | 1373 | return ERR_PTR(-EAGAIN); |
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index 76e59e9b8fb8..12ab4ec5fc79 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c | |||
@@ -97,7 +97,7 @@ void rpc_getport(struct rpc_task *task) | |||
97 | struct rpc_task *child; | 97 | struct rpc_task *child; |
98 | int status; | 98 | int status; |
99 | 99 | ||
100 | dprintk("RPC: %4d rpc_getport(%s, %u, %u, %d)\n", | 100 | dprintk("RPC: %5u rpc_getport(%s, %u, %u, %d)\n", |
101 | task->tk_pid, clnt->cl_server, | 101 | task->tk_pid, clnt->cl_server, |
102 | clnt->cl_prog, clnt->cl_vers, xprt->prot); | 102 | clnt->cl_prog, clnt->cl_vers, xprt->prot); |
103 | 103 | ||
@@ -178,7 +178,7 @@ int rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int pr | |||
178 | char hostname[32]; | 178 | char hostname[32]; |
179 | int status; | 179 | int status; |
180 | 180 | ||
181 | dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", | 181 | dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", |
182 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); | 182 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); |
183 | 183 | ||
184 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); | 184 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); |
@@ -221,7 +221,7 @@ static void pmap_getport_done(struct rpc_task *child, void *data) | |||
221 | status = 0; | 221 | status = 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | dprintk("RPC: %4d pmap_getport_done(status %d, port %u)\n", | 224 | dprintk("RPC: %5u pmap_getport_done(status %d, port %u)\n", |
225 | child->tk_pid, status, map->pm_port); | 225 | child->tk_pid, status, map->pm_port); |
226 | 226 | ||
227 | pmap_wake_portmap_waiters(xprt, status); | 227 | pmap_wake_portmap_waiters(xprt, status); |
@@ -257,13 +257,14 @@ int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
257 | struct rpc_clnt *pmap_clnt; | 257 | struct rpc_clnt *pmap_clnt; |
258 | int error = 0; | 258 | int error = 0; |
259 | 259 | ||
260 | dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n", | 260 | dprintk("RPC: registering (%u, %u, %d, %u) with portmapper.\n", |
261 | prog, vers, prot, port); | 261 | prog, vers, prot, port); |
262 | 262 | ||
263 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); | 263 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); |
264 | if (IS_ERR(pmap_clnt)) { | 264 | if (IS_ERR(pmap_clnt)) { |
265 | error = PTR_ERR(pmap_clnt); | 265 | error = PTR_ERR(pmap_clnt); |
266 | dprintk("RPC: couldn't create pmap client. Error = %d\n", error); | 266 | dprintk("RPC: couldn't create pmap client. Error = %d\n", |
267 | error); | ||
267 | return error; | 268 | return error; |
268 | } | 269 | } |
269 | 270 | ||
@@ -274,7 +275,7 @@ int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
274 | "RPC: failed to contact portmap (errno %d).\n", | 275 | "RPC: failed to contact portmap (errno %d).\n", |
275 | error); | 276 | error); |
276 | } | 277 | } |
277 | dprintk("RPC: registration status %d/%d\n", error, *okay); | 278 | dprintk("RPC: registration status %d/%d\n", error, *okay); |
278 | 279 | ||
279 | /* Client deleted automatically because cl_oneshot == 1 */ | 280 | /* Client deleted automatically because cl_oneshot == 1 */ |
280 | return error; | 281 | return error; |
@@ -305,8 +306,9 @@ static struct rpc_clnt *pmap_create(char *hostname, struct sockaddr_in *srvaddr, | |||
305 | */ | 306 | */ |
306 | static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map) | 307 | static int xdr_encode_mapping(struct rpc_rqst *req, __be32 *p, struct portmap_args *map) |
307 | { | 308 | { |
308 | dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n", | 309 | dprintk("RPC: xdr_encode_mapping(%u, %u, %u, %u)\n", |
309 | map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port); | 310 | map->pm_prog, map->pm_vers, |
311 | map->pm_prot, map->pm_port); | ||
310 | *p++ = htonl(map->pm_prog); | 312 | *p++ = htonl(map->pm_prog); |
311 | *p++ = htonl(map->pm_vers); | 313 | *p++ = htonl(map->pm_vers); |
312 | *p++ = htonl(map->pm_prot); | 314 | *p++ = htonl(map->pm_prot); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 13ab0c6fed01..ca56b8e9b649 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(rpc_sched_lock); | |||
74 | static inline void | 74 | static inline void |
75 | __rpc_disable_timer(struct rpc_task *task) | 75 | __rpc_disable_timer(struct rpc_task *task) |
76 | { | 76 | { |
77 | dprintk("RPC: %4d disabling timer\n", task->tk_pid); | 77 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
78 | task->tk_timeout_fn = NULL; | 78 | task->tk_timeout_fn = NULL; |
79 | task->tk_timeout = 0; | 79 | task->tk_timeout = 0; |
80 | } | 80 | } |
@@ -93,7 +93,7 @@ static void rpc_run_timer(struct rpc_task *task) | |||
93 | callback = task->tk_timeout_fn; | 93 | callback = task->tk_timeout_fn; |
94 | task->tk_timeout_fn = NULL; | 94 | task->tk_timeout_fn = NULL; |
95 | if (callback && RPC_IS_QUEUED(task)) { | 95 | if (callback && RPC_IS_QUEUED(task)) { |
96 | dprintk("RPC: %4d running timer\n", task->tk_pid); | 96 | dprintk("RPC: %5u running timer\n", task->tk_pid); |
97 | callback(task); | 97 | callback(task); |
98 | } | 98 | } |
99 | smp_mb__before_clear_bit(); | 99 | smp_mb__before_clear_bit(); |
@@ -110,7 +110,7 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer) | |||
110 | if (!task->tk_timeout) | 110 | if (!task->tk_timeout) |
111 | return; | 111 | return; |
112 | 112 | ||
113 | dprintk("RPC: %4d setting alarm for %lu ms\n", | 113 | dprintk("RPC: %5u setting alarm for %lu ms\n", |
114 | task->tk_pid, task->tk_timeout * 1000 / HZ); | 114 | task->tk_pid, task->tk_timeout * 1000 / HZ); |
115 | 115 | ||
116 | if (timer) | 116 | if (timer) |
@@ -132,7 +132,7 @@ rpc_delete_timer(struct rpc_task *task) | |||
132 | return; | 132 | return; |
133 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { | 133 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { |
134 | del_singleshot_timer_sync(&task->tk_timer); | 134 | del_singleshot_timer_sync(&task->tk_timer); |
135 | dprintk("RPC: %4d deleting timer\n", task->tk_pid); | 135 | dprintk("RPC: %5u deleting timer\n", task->tk_pid); |
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
@@ -179,8 +179,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task * | |||
179 | queue->qlen++; | 179 | queue->qlen++; |
180 | rpc_set_queued(task); | 180 | rpc_set_queued(task); |
181 | 181 | ||
182 | dprintk("RPC: %4d added to queue %p \"%s\"\n", | 182 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
183 | task->tk_pid, queue, rpc_qname(queue)); | 183 | task->tk_pid, queue, rpc_qname(queue)); |
184 | } | 184 | } |
185 | 185 | ||
186 | /* | 186 | /* |
@@ -212,8 +212,8 @@ static void __rpc_remove_wait_queue(struct rpc_task *task) | |||
212 | else | 212 | else |
213 | list_del(&task->u.tk_wait.list); | 213 | list_del(&task->u.tk_wait.list); |
214 | queue->qlen--; | 214 | queue->qlen--; |
215 | dprintk("RPC: %4d removed from queue %p \"%s\"\n", | 215 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
216 | task->tk_pid, queue, rpc_qname(queue)); | 216 | task->tk_pid, queue, rpc_qname(queue)); |
217 | } | 217 | } |
218 | 218 | ||
219 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | 219 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
@@ -344,8 +344,8 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
344 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 344 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
345 | rpc_action action, rpc_action timer) | 345 | rpc_action action, rpc_action timer) |
346 | { | 346 | { |
347 | dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid, | 347 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
348 | rpc_qname(q), jiffies); | 348 | task->tk_pid, rpc_qname(q), jiffies); |
349 | 349 | ||
350 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | 350 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { |
351 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | 351 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); |
@@ -381,7 +381,8 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
381 | */ | 381 | */ |
382 | static void __rpc_do_wake_up_task(struct rpc_task *task) | 382 | static void __rpc_do_wake_up_task(struct rpc_task *task) |
383 | { | 383 | { |
384 | dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies); | 384 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
385 | task->tk_pid, jiffies); | ||
385 | 386 | ||
386 | #ifdef RPC_DEBUG | 387 | #ifdef RPC_DEBUG |
387 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 388 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
@@ -397,7 +398,7 @@ static void __rpc_do_wake_up_task(struct rpc_task *task) | |||
397 | 398 | ||
398 | rpc_make_runnable(task); | 399 | rpc_make_runnable(task); |
399 | 400 | ||
400 | dprintk("RPC: __rpc_wake_up_task done\n"); | 401 | dprintk("RPC: __rpc_wake_up_task done\n"); |
401 | } | 402 | } |
402 | 403 | ||
403 | /* | 404 | /* |
@@ -418,7 +419,7 @@ static void __rpc_wake_up_task(struct rpc_task *task) | |||
418 | static void | 419 | static void |
419 | __rpc_default_timer(struct rpc_task *task) | 420 | __rpc_default_timer(struct rpc_task *task) |
420 | { | 421 | { |
421 | dprintk("RPC: %d timeout (default timer)\n", task->tk_pid); | 422 | dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid); |
422 | task->tk_status = -ETIMEDOUT; | 423 | task->tk_status = -ETIMEDOUT; |
423 | rpc_wake_up_task(task); | 424 | rpc_wake_up_task(task); |
424 | } | 425 | } |
@@ -502,7 +503,8 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
502 | { | 503 | { |
503 | struct rpc_task *task = NULL; | 504 | struct rpc_task *task = NULL; |
504 | 505 | ||
505 | dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); | 506 | dprintk("RPC: wake_up_next(%p \"%s\")\n", |
507 | queue, rpc_qname(queue)); | ||
506 | rcu_read_lock_bh(); | 508 | rcu_read_lock_bh(); |
507 | spin_lock(&queue->lock); | 509 | spin_lock(&queue->lock); |
508 | if (RPC_IS_PRIORITY(queue)) | 510 | if (RPC_IS_PRIORITY(queue)) |
@@ -629,8 +631,8 @@ static void __rpc_execute(struct rpc_task *task) | |||
629 | { | 631 | { |
630 | int status = 0; | 632 | int status = 0; |
631 | 633 | ||
632 | dprintk("RPC: %4d rpc_execute flgs %x\n", | 634 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
633 | task->tk_pid, task->tk_flags); | 635 | task->tk_pid, task->tk_flags); |
634 | 636 | ||
635 | BUG_ON(RPC_IS_QUEUED(task)); | 637 | BUG_ON(RPC_IS_QUEUED(task)); |
636 | 638 | ||
@@ -686,7 +688,7 @@ static void __rpc_execute(struct rpc_task *task) | |||
686 | } | 688 | } |
687 | 689 | ||
688 | /* sync task: sleep here */ | 690 | /* sync task: sleep here */ |
689 | dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); | 691 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
690 | /* Note: Caller should be using rpc_clnt_sigmask() */ | 692 | /* Note: Caller should be using rpc_clnt_sigmask() */ |
691 | status = out_of_line_wait_on_bit(&task->tk_runstate, | 693 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
692 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, | 694 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, |
@@ -698,16 +700,17 @@ static void __rpc_execute(struct rpc_task *task) | |||
698 | * clean up after sleeping on some queue, we don't | 700 | * clean up after sleeping on some queue, we don't |
699 | * break the loop here, but go around once more. | 701 | * break the loop here, but go around once more. |
700 | */ | 702 | */ |
701 | dprintk("RPC: %4d got signal\n", task->tk_pid); | 703 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
702 | task->tk_flags |= RPC_TASK_KILLED; | 704 | task->tk_flags |= RPC_TASK_KILLED; |
703 | rpc_exit(task, -ERESTARTSYS); | 705 | rpc_exit(task, -ERESTARTSYS); |
704 | rpc_wake_up_task(task); | 706 | rpc_wake_up_task(task); |
705 | } | 707 | } |
706 | rpc_set_running(task); | 708 | rpc_set_running(task); |
707 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); | 709 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
708 | } | 710 | } |
709 | 711 | ||
710 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); | 712 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
713 | task->tk_status); | ||
711 | /* Release all resources associated with the task */ | 714 | /* Release all resources associated with the task */ |
712 | rpc_release_task(task); | 715 | rpc_release_task(task); |
713 | } | 716 | } |
@@ -824,7 +827,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
824 | /* starting timestamp */ | 827 | /* starting timestamp */ |
825 | task->tk_start = jiffies; | 828 | task->tk_start = jiffies; |
826 | 829 | ||
827 | dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, | 830 | dprintk("RPC: new task initialized, procpid %u\n", |
828 | current->pid); | 831 | current->pid); |
829 | } | 832 | } |
830 | 833 | ||
@@ -837,7 +840,7 @@ rpc_alloc_task(void) | |||
837 | static void rpc_free_task(struct rcu_head *rcu) | 840 | static void rpc_free_task(struct rcu_head *rcu) |
838 | { | 841 | { |
839 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); | 842 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); |
840 | dprintk("RPC: %4d freeing task\n", task->tk_pid); | 843 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
841 | mempool_free(task, rpc_task_mempool); | 844 | mempool_free(task, rpc_task_mempool); |
842 | } | 845 | } |
843 | 846 | ||
@@ -856,7 +859,7 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc | |||
856 | 859 | ||
857 | rpc_init_task(task, clnt, flags, tk_ops, calldata); | 860 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
858 | 861 | ||
859 | dprintk("RPC: %4d allocated task\n", task->tk_pid); | 862 | dprintk("RPC: allocated task %p\n", task); |
860 | task->tk_flags |= RPC_TASK_DYNAMIC; | 863 | task->tk_flags |= RPC_TASK_DYNAMIC; |
861 | out: | 864 | out: |
862 | return task; | 865 | return task; |
@@ -900,7 +903,7 @@ static void rpc_release_task(struct rpc_task *task) | |||
900 | #ifdef RPC_DEBUG | 903 | #ifdef RPC_DEBUG |
901 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 904 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
902 | #endif | 905 | #endif |
903 | dprintk("RPC: %4d release task\n", task->tk_pid); | 906 | dprintk("RPC: %5u release task\n", task->tk_pid); |
904 | 907 | ||
905 | /* Remove from global task list */ | 908 | /* Remove from global task list */ |
906 | spin_lock(&rpc_sched_lock); | 909 | spin_lock(&rpc_sched_lock); |
@@ -953,7 +956,7 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) | |||
953 | struct rpc_task *rovr; | 956 | struct rpc_task *rovr; |
954 | struct list_head *le; | 957 | struct list_head *le; |
955 | 958 | ||
956 | dprintk("RPC: killing all tasks for client %p\n", clnt); | 959 | dprintk("RPC: killing all tasks for client %p\n", clnt); |
957 | 960 | ||
958 | /* | 961 | /* |
959 | * Spin lock all_tasks to prevent changes... | 962 | * Spin lock all_tasks to prevent changes... |
@@ -982,7 +985,8 @@ static void rpciod_killall(void) | |||
982 | rpc_killall_tasks(NULL); | 985 | rpc_killall_tasks(NULL); |
983 | flush_workqueue(rpciod_workqueue); | 986 | flush_workqueue(rpciod_workqueue); |
984 | if (!list_empty(&all_tasks)) { | 987 | if (!list_empty(&all_tasks)) { |
985 | dprintk("rpciod_killall: waiting for tasks to exit\n"); | 988 | dprintk("RPC: rpciod_killall: waiting for tasks " |
989 | "to exit\n"); | ||
986 | yield(); | 990 | yield(); |
987 | } | 991 | } |
988 | } | 992 | } |
@@ -1002,7 +1006,7 @@ rpciod_up(void) | |||
1002 | int error = 0; | 1006 | int error = 0; |
1003 | 1007 | ||
1004 | mutex_lock(&rpciod_mutex); | 1008 | mutex_lock(&rpciod_mutex); |
1005 | dprintk("rpciod_up: users %d\n", rpciod_users); | 1009 | dprintk("RPC: rpciod_up: users %u\n", rpciod_users); |
1006 | rpciod_users++; | 1010 | rpciod_users++; |
1007 | if (rpciod_workqueue) | 1011 | if (rpciod_workqueue) |
1008 | goto out; | 1012 | goto out; |
@@ -1010,7 +1014,7 @@ rpciod_up(void) | |||
1010 | * If there's no pid, we should be the first user. | 1014 | * If there's no pid, we should be the first user. |
1011 | */ | 1015 | */ |
1012 | if (rpciod_users > 1) | 1016 | if (rpciod_users > 1) |
1013 | printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users); | 1017 | printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); |
1014 | /* | 1018 | /* |
1015 | * Create the rpciod thread and wait for it to start. | 1019 | * Create the rpciod thread and wait for it to start. |
1016 | */ | 1020 | */ |
@@ -1032,7 +1036,7 @@ void | |||
1032 | rpciod_down(void) | 1036 | rpciod_down(void) |
1033 | { | 1037 | { |
1034 | mutex_lock(&rpciod_mutex); | 1038 | mutex_lock(&rpciod_mutex); |
1035 | dprintk("rpciod_down sema %d\n", rpciod_users); | 1039 | dprintk("RPC: rpciod_down sema %u\n", rpciod_users); |
1036 | if (rpciod_users) { | 1040 | if (rpciod_users) { |
1037 | if (--rpciod_users) | 1041 | if (--rpciod_users) |
1038 | goto out; | 1042 | goto out; |
@@ -1040,7 +1044,7 @@ rpciod_down(void) | |||
1040 | printk(KERN_WARNING "rpciod_down: no users??\n"); | 1044 | printk(KERN_WARNING "rpciod_down: no users??\n"); |
1041 | 1045 | ||
1042 | if (!rpciod_workqueue) { | 1046 | if (!rpciod_workqueue) { |
1043 | dprintk("rpciod_down: Nothing to do!\n"); | 1047 | dprintk("RPC: rpciod_down: Nothing to do!\n"); |
1044 | goto out; | 1048 | goto out; |
1045 | } | 1049 | } |
1046 | rpciod_killall(); | 1050 | rpciod_killall(); |
@@ -1070,7 +1074,7 @@ void rpc_show_tasks(void) | |||
1070 | if (RPC_IS_QUEUED(t)) | 1074 | if (RPC_IS_QUEUED(t)) |
1071 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | 1075 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); |
1072 | 1076 | ||
1073 | printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n", | 1077 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", |
1074 | t->tk_pid, | 1078 | t->tk_pid, |
1075 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | 1079 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), |
1076 | t->tk_flags, t->tk_status, | 1080 | t->tk_flags, t->tk_status, |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index bd98124c3a64..d19cd9ec6e99 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -226,7 +226,7 @@ do_register(const char *name, void *data, const struct file_operations *fops) | |||
226 | struct proc_dir_entry *ent; | 226 | struct proc_dir_entry *ent; |
227 | 227 | ||
228 | rpc_proc_init(); | 228 | rpc_proc_init(); |
229 | dprintk("RPC: registering /proc/net/rpc/%s\n", name); | 229 | dprintk("RPC: registering /proc/net/rpc/%s\n", name); |
230 | 230 | ||
231 | ent = create_proc_entry(name, 0, proc_net_rpc); | 231 | ent = create_proc_entry(name, 0, proc_net_rpc); |
232 | if (ent) { | 232 | if (ent) { |
@@ -263,7 +263,7 @@ svc_proc_unregister(const char *name) | |||
263 | void | 263 | void |
264 | rpc_proc_init(void) | 264 | rpc_proc_init(void) |
265 | { | 265 | { |
266 | dprintk("RPC: registering /proc/net/rpc\n"); | 266 | dprintk("RPC: registering /proc/net/rpc\n"); |
267 | if (!proc_net_rpc) { | 267 | if (!proc_net_rpc) { |
268 | struct proc_dir_entry *ent; | 268 | struct proc_dir_entry *ent; |
269 | ent = proc_mkdir("rpc", proc_net); | 269 | ent = proc_mkdir("rpc", proc_net); |
@@ -277,7 +277,7 @@ rpc_proc_init(void) | |||
277 | void | 277 | void |
278 | rpc_proc_exit(void) | 278 | rpc_proc_exit(void) |
279 | { | 279 | { |
280 | dprintk("RPC: unregistering /proc/net/rpc\n"); | 280 | dprintk("RPC: unregistering /proc/net/rpc\n"); |
281 | if (proc_net_rpc) { | 281 | if (proc_net_rpc) { |
282 | proc_net_rpc = NULL; | 282 | proc_net_rpc = NULL; |
283 | remove_proc_entry("net/rpc", NULL); | 283 | remove_proc_entry("net/rpc", NULL); |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 4c1611211119..b0fb6406d54f 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -317,7 +317,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, | |||
317 | for (i = 0; i < serv->sv_nrpools; i++) { | 317 | for (i = 0; i < serv->sv_nrpools; i++) { |
318 | struct svc_pool *pool = &serv->sv_pools[i]; | 318 | struct svc_pool *pool = &serv->sv_pools[i]; |
319 | 319 | ||
320 | dprintk("initialising pool %u for %s\n", | 320 | dprintk("svc: initialising pool %u for %s\n", |
321 | i, serv->sv_name); | 321 | i, serv->sv_name); |
322 | 322 | ||
323 | pool->sp_id = i; | 323 | pool->sp_id = i; |
@@ -368,7 +368,7 @@ svc_destroy(struct svc_serv *serv) | |||
368 | { | 368 | { |
369 | struct svc_sock *svsk; | 369 | struct svc_sock *svsk; |
370 | 370 | ||
371 | dprintk("RPC: svc_destroy(%s, %d)\n", | 371 | dprintk("svc: svc_destroy(%s, %d)\n", |
372 | serv->sv_program->pg_name, | 372 | serv->sv_program->pg_name, |
373 | serv->sv_nrthreads); | 373 | serv->sv_nrthreads); |
374 | 374 | ||
@@ -654,7 +654,7 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port) | |||
654 | if (progp->pg_vers[i] == NULL) | 654 | if (progp->pg_vers[i] == NULL) |
655 | continue; | 655 | continue; |
656 | 656 | ||
657 | dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n", | 657 | dprintk("svc: svc_register(%s, %s, %d, %d)%s\n", |
658 | progp->pg_name, | 658 | progp->pg_name, |
659 | proto == IPPROTO_UDP? "udp" : "tcp", | 659 | proto == IPPROTO_UDP? "udp" : "tcp", |
660 | port, | 660 | port, |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 7a3999f0a4a2..cf59f7d315d0 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -108,7 +108,7 @@ int xprt_reserve_xprt(struct rpc_task *task) | |||
108 | return 1; | 108 | return 1; |
109 | 109 | ||
110 | out_sleep: | 110 | out_sleep: |
111 | dprintk("RPC: %4d failed to lock transport %p\n", | 111 | dprintk("RPC: %5u failed to lock transport %p\n", |
112 | task->tk_pid, xprt); | 112 | task->tk_pid, xprt); |
113 | task->tk_timeout = 0; | 113 | task->tk_timeout = 0; |
114 | task->tk_status = -EAGAIN; | 114 | task->tk_status = -EAGAIN; |
@@ -158,7 +158,7 @@ int xprt_reserve_xprt_cong(struct rpc_task *task) | |||
158 | } | 158 | } |
159 | xprt_clear_locked(xprt); | 159 | xprt_clear_locked(xprt); |
160 | out_sleep: | 160 | out_sleep: |
161 | dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); | 161 | dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); |
162 | task->tk_timeout = 0; | 162 | task->tk_timeout = 0; |
163 | task->tk_status = -EAGAIN; | 163 | task->tk_status = -EAGAIN; |
164 | if (req && req->rq_ntrans) | 164 | if (req && req->rq_ntrans) |
@@ -281,7 +281,7 @@ __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) | |||
281 | 281 | ||
282 | if (req->rq_cong) | 282 | if (req->rq_cong) |
283 | return 1; | 283 | return 1; |
284 | dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n", | 284 | dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", |
285 | task->tk_pid, xprt->cong, xprt->cwnd); | 285 | task->tk_pid, xprt->cong, xprt->cwnd); |
286 | if (RPCXPRT_CONGESTED(xprt)) | 286 | if (RPCXPRT_CONGESTED(xprt)) |
287 | return 0; | 287 | return 0; |
@@ -340,7 +340,7 @@ void xprt_adjust_cwnd(struct rpc_task *task, int result) | |||
340 | if (cwnd < RPC_CWNDSCALE) | 340 | if (cwnd < RPC_CWNDSCALE) |
341 | cwnd = RPC_CWNDSCALE; | 341 | cwnd = RPC_CWNDSCALE; |
342 | } | 342 | } |
343 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", | 343 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", |
344 | xprt->cong, xprt->cwnd, cwnd); | 344 | xprt->cong, xprt->cwnd, cwnd); |
345 | xprt->cwnd = cwnd; | 345 | xprt->cwnd = cwnd; |
346 | __xprt_put_cong(xprt, req); | 346 | __xprt_put_cong(xprt, req); |
@@ -387,8 +387,8 @@ void xprt_write_space(struct rpc_xprt *xprt) | |||
387 | 387 | ||
388 | spin_lock_bh(&xprt->transport_lock); | 388 | spin_lock_bh(&xprt->transport_lock); |
389 | if (xprt->snd_task) { | 389 | if (xprt->snd_task) { |
390 | dprintk("RPC: write space: waking waiting task on xprt %p\n", | 390 | dprintk("RPC: write space: waking waiting task on " |
391 | xprt); | 391 | "xprt %p\n", xprt); |
392 | rpc_wake_up_task(xprt->snd_task); | 392 | rpc_wake_up_task(xprt->snd_task); |
393 | } | 393 | } |
394 | spin_unlock_bh(&xprt->transport_lock); | 394 | spin_unlock_bh(&xprt->transport_lock); |
@@ -494,7 +494,7 @@ static void xprt_autoclose(struct work_struct *work) | |||
494 | */ | 494 | */ |
495 | void xprt_disconnect(struct rpc_xprt *xprt) | 495 | void xprt_disconnect(struct rpc_xprt *xprt) |
496 | { | 496 | { |
497 | dprintk("RPC: disconnected transport %p\n", xprt); | 497 | dprintk("RPC: disconnected transport %p\n", xprt); |
498 | spin_lock_bh(&xprt->transport_lock); | 498 | spin_lock_bh(&xprt->transport_lock); |
499 | xprt_clear_connected(xprt); | 499 | xprt_clear_connected(xprt); |
500 | xprt_wake_pending_tasks(xprt, -ENOTCONN); | 500 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
@@ -530,7 +530,7 @@ void xprt_connect(struct rpc_task *task) | |||
530 | { | 530 | { |
531 | struct rpc_xprt *xprt = task->tk_xprt; | 531 | struct rpc_xprt *xprt = task->tk_xprt; |
532 | 532 | ||
533 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, | 533 | dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, |
534 | xprt, (xprt_connected(xprt) ? "is" : "is not")); | 534 | xprt, (xprt_connected(xprt) ? "is" : "is not")); |
535 | 535 | ||
536 | if (!xprt_bound(xprt)) { | 536 | if (!xprt_bound(xprt)) { |
@@ -560,7 +560,7 @@ static void xprt_connect_status(struct rpc_task *task) | |||
560 | if (task->tk_status >= 0) { | 560 | if (task->tk_status >= 0) { |
561 | xprt->stat.connect_count++; | 561 | xprt->stat.connect_count++; |
562 | xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; | 562 | xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; |
563 | dprintk("RPC: %4d xprt_connect_status: connection established\n", | 563 | dprintk("RPC: %5u xprt_connect_status: connection established\n", |
564 | task->tk_pid); | 564 | task->tk_pid); |
565 | return; | 565 | return; |
566 | } | 566 | } |
@@ -568,20 +568,22 @@ static void xprt_connect_status(struct rpc_task *task) | |||
568 | switch (task->tk_status) { | 568 | switch (task->tk_status) { |
569 | case -ECONNREFUSED: | 569 | case -ECONNREFUSED: |
570 | case -ECONNRESET: | 570 | case -ECONNRESET: |
571 | dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", | 571 | dprintk("RPC: %5u xprt_connect_status: server %s refused " |
572 | task->tk_pid, task->tk_client->cl_server); | 572 | "connection\n", task->tk_pid, |
573 | task->tk_client->cl_server); | ||
573 | break; | 574 | break; |
574 | case -ENOTCONN: | 575 | case -ENOTCONN: |
575 | dprintk("RPC: %4d xprt_connect_status: connection broken\n", | 576 | dprintk("RPC: %5u xprt_connect_status: connection broken\n", |
576 | task->tk_pid); | 577 | task->tk_pid); |
577 | break; | 578 | break; |
578 | case -ETIMEDOUT: | 579 | case -ETIMEDOUT: |
579 | dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", | 580 | dprintk("RPC: %5u xprt_connect_status: connect attempt timed " |
580 | task->tk_pid); | 581 | "out\n", task->tk_pid); |
581 | break; | 582 | break; |
582 | default: | 583 | default: |
583 | dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", | 584 | dprintk("RPC: %5u xprt_connect_status: error %d connecting to " |
584 | task->tk_pid, -task->tk_status, task->tk_client->cl_server); | 585 | "server %s\n", task->tk_pid, -task->tk_status, |
586 | task->tk_client->cl_server); | ||
585 | xprt_release_write(xprt, task); | 587 | xprt_release_write(xprt, task); |
586 | task->tk_status = -EIO; | 588 | task->tk_status = -EIO; |
587 | } | 589 | } |
@@ -602,6 +604,9 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) | |||
602 | if (entry->rq_xid == xid) | 604 | if (entry->rq_xid == xid) |
603 | return entry; | 605 | return entry; |
604 | } | 606 | } |
607 | |||
608 | dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", | ||
609 | ntohl(xid)); | ||
605 | xprt->stat.bad_xids++; | 610 | xprt->stat.bad_xids++; |
606 | return NULL; | 611 | return NULL; |
607 | } | 612 | } |
@@ -654,7 +659,7 @@ static void xprt_timer(struct rpc_task *task) | |||
654 | struct rpc_rqst *req = task->tk_rqstp; | 659 | struct rpc_rqst *req = task->tk_rqstp; |
655 | struct rpc_xprt *xprt = req->rq_xprt; | 660 | struct rpc_xprt *xprt = req->rq_xprt; |
656 | 661 | ||
657 | dprintk("RPC: %4d xprt_timer\n", task->tk_pid); | 662 | dprintk("RPC: %5u xprt_timer\n", task->tk_pid); |
658 | 663 | ||
659 | spin_lock(&xprt->transport_lock); | 664 | spin_lock(&xprt->transport_lock); |
660 | if (!req->rq_received) { | 665 | if (!req->rq_received) { |
@@ -678,7 +683,7 @@ int xprt_prepare_transmit(struct rpc_task *task) | |||
678 | struct rpc_xprt *xprt = req->rq_xprt; | 683 | struct rpc_xprt *xprt = req->rq_xprt; |
679 | int err = 0; | 684 | int err = 0; |
680 | 685 | ||
681 | dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid); | 686 | dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); |
682 | 687 | ||
683 | spin_lock_bh(&xprt->transport_lock); | 688 | spin_lock_bh(&xprt->transport_lock); |
684 | if (req->rq_received && !req->rq_bytes_sent) { | 689 | if (req->rq_received && !req->rq_bytes_sent) { |
@@ -716,7 +721,7 @@ void xprt_transmit(struct rpc_task *task) | |||
716 | struct rpc_xprt *xprt = req->rq_xprt; | 721 | struct rpc_xprt *xprt = req->rq_xprt; |
717 | int status; | 722 | int status; |
718 | 723 | ||
719 | dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); | 724 | dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); |
720 | 725 | ||
721 | if (!req->rq_received) { | 726 | if (!req->rq_received) { |
722 | if (list_empty(&req->rq_list)) { | 727 | if (list_empty(&req->rq_list)) { |
@@ -736,7 +741,7 @@ void xprt_transmit(struct rpc_task *task) | |||
736 | 741 | ||
737 | status = xprt->ops->send_request(task); | 742 | status = xprt->ops->send_request(task); |
738 | if (status == 0) { | 743 | if (status == 0) { |
739 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); | 744 | dprintk("RPC: %5u xmit complete\n", task->tk_pid); |
740 | spin_lock_bh(&xprt->transport_lock); | 745 | spin_lock_bh(&xprt->transport_lock); |
741 | 746 | ||
742 | xprt->ops->set_retrans_timeout(task); | 747 | xprt->ops->set_retrans_timeout(task); |
@@ -777,7 +782,7 @@ static inline void do_xprt_reserve(struct rpc_task *task) | |||
777 | xprt_request_init(task, xprt); | 782 | xprt_request_init(task, xprt); |
778 | return; | 783 | return; |
779 | } | 784 | } |
780 | dprintk("RPC: waiting for request slot\n"); | 785 | dprintk("RPC: waiting for request slot\n"); |
781 | task->tk_status = -EAGAIN; | 786 | task->tk_status = -EAGAIN; |
782 | task->tk_timeout = 0; | 787 | task->tk_timeout = 0; |
783 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); | 788 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); |
@@ -822,7 +827,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
822 | req->rq_xid = xprt_alloc_xid(xprt); | 827 | req->rq_xid = xprt_alloc_xid(xprt); |
823 | req->rq_release_snd_buf = NULL; | 828 | req->rq_release_snd_buf = NULL; |
824 | xprt_reset_majortimeo(req); | 829 | xprt_reset_majortimeo(req); |
825 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, | 830 | dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, |
826 | req, ntohl(req->rq_xid)); | 831 | req, ntohl(req->rq_xid)); |
827 | } | 832 | } |
828 | 833 | ||
@@ -856,7 +861,7 @@ void xprt_release(struct rpc_task *task) | |||
856 | req->rq_release_snd_buf(req); | 861 | req->rq_release_snd_buf(req); |
857 | memset(req, 0, sizeof(*req)); /* mark unused */ | 862 | memset(req, 0, sizeof(*req)); /* mark unused */ |
858 | 863 | ||
859 | dprintk("RPC: %4d release request %p\n", task->tk_pid, req); | 864 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); |
860 | 865 | ||
861 | spin_lock(&xprt->reserve_lock); | 866 | spin_lock(&xprt->reserve_lock); |
862 | list_add(&req->rq_list, &xprt->free); | 867 | list_add(&req->rq_list, &xprt->free); |
@@ -906,7 +911,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
906 | return ERR_PTR(-EIO); | 911 | return ERR_PTR(-EIO); |
907 | } | 912 | } |
908 | if (IS_ERR(xprt)) { | 913 | if (IS_ERR(xprt)) { |
909 | dprintk("RPC: xprt_create_transport: failed, %ld\n", | 914 | dprintk("RPC: xprt_create_transport: failed, %ld\n", |
910 | -PTR_ERR(xprt)); | 915 | -PTR_ERR(xprt)); |
911 | return xprt; | 916 | return xprt; |
912 | } | 917 | } |
@@ -936,7 +941,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
936 | 941 | ||
937 | xprt_init_xid(xprt); | 942 | xprt_init_xid(xprt); |
938 | 943 | ||
939 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 944 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
940 | xprt->max_reqs); | 945 | xprt->max_reqs); |
941 | 946 | ||
942 | return xprt; | 947 | return xprt; |
@@ -951,7 +956,7 @@ static void xprt_destroy(struct kref *kref) | |||
951 | { | 956 | { |
952 | struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref); | 957 | struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref); |
953 | 958 | ||
954 | dprintk("RPC: destroying transport %p\n", xprt); | 959 | dprintk("RPC: destroying transport %p\n", xprt); |
955 | xprt->shutdown = 1; | 960 | xprt->shutdown = 1; |
956 | del_timer_sync(&xprt->timer); | 961 | del_timer_sync(&xprt->timer); |
957 | 962 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 49cabffd7fdb..64736b3a59a7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -192,7 +192,7 @@ static void xs_pktdump(char *msg, u32 *packet, unsigned int count) | |||
192 | u8 *buf = (u8 *) packet; | 192 | u8 *buf = (u8 *) packet; |
193 | int j; | 193 | int j; |
194 | 194 | ||
195 | dprintk("RPC: %s\n", msg); | 195 | dprintk("RPC: %s\n", msg); |
196 | for (j = 0; j < count && j < 128; j += 4) { | 196 | for (j = 0; j < count && j < 128; j += 4) { |
197 | if (!(j & 31)) { | 197 | if (!(j & 31)) { |
198 | if (j) | 198 | if (j) |
@@ -418,7 +418,7 @@ static void xs_nospace(struct rpc_task *task) | |||
418 | struct rpc_xprt *xprt = req->rq_xprt; | 418 | struct rpc_xprt *xprt = req->rq_xprt; |
419 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 419 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
420 | 420 | ||
421 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", | 421 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", |
422 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | 422 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, |
423 | req->rq_slen); | 423 | req->rq_slen); |
424 | 424 | ||
@@ -467,7 +467,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
467 | xprt->addrlen, xdr, | 467 | xprt->addrlen, xdr, |
468 | req->rq_bytes_sent); | 468 | req->rq_bytes_sent); |
469 | 469 | ||
470 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", | 470 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
471 | xdr->len - req->rq_bytes_sent, status); | 471 | xdr->len - req->rq_bytes_sent, status); |
472 | 472 | ||
473 | if (likely(status >= (int) req->rq_slen)) | 473 | if (likely(status >= (int) req->rq_slen)) |
@@ -488,7 +488,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
488 | xs_nospace(task); | 488 | xs_nospace(task); |
489 | break; | 489 | break; |
490 | default: | 490 | default: |
491 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | 491 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
492 | -status); | 492 | -status); |
493 | break; | 493 | break; |
494 | } | 494 | } |
@@ -539,7 +539,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
539 | status = xs_sendpages(transport->sock, | 539 | status = xs_sendpages(transport->sock, |
540 | NULL, 0, xdr, req->rq_bytes_sent); | 540 | NULL, 0, xdr, req->rq_bytes_sent); |
541 | 541 | ||
542 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", | 542 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
543 | xdr->len - req->rq_bytes_sent, status); | 543 | xdr->len - req->rq_bytes_sent, status); |
544 | 544 | ||
545 | if (unlikely(status < 0)) | 545 | if (unlikely(status < 0)) |
@@ -570,7 +570,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
570 | status = -ENOTCONN; | 570 | status = -ENOTCONN; |
571 | break; | 571 | break; |
572 | default: | 572 | default: |
573 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | 573 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
574 | -status); | 574 | -status); |
575 | xprt_disconnect(xprt); | 575 | xprt_disconnect(xprt); |
576 | break; | 576 | break; |
@@ -622,7 +622,7 @@ static void xs_close(struct rpc_xprt *xprt) | |||
622 | if (!sk) | 622 | if (!sk) |
623 | goto clear_close_wait; | 623 | goto clear_close_wait; |
624 | 624 | ||
625 | dprintk("RPC: xs_close xprt %p\n", xprt); | 625 | dprintk("RPC: xs_close xprt %p\n", xprt); |
626 | 626 | ||
627 | write_lock_bh(&sk->sk_callback_lock); | 627 | write_lock_bh(&sk->sk_callback_lock); |
628 | transport->inet = NULL; | 628 | transport->inet = NULL; |
@@ -652,7 +652,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
652 | { | 652 | { |
653 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 653 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
654 | 654 | ||
655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
656 | 656 | ||
657 | cancel_delayed_work(&transport->connect_worker); | 657 | cancel_delayed_work(&transport->connect_worker); |
658 | flush_scheduled_work(); | 658 | flush_scheduled_work(); |
@@ -686,7 +686,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
686 | __be32 *xp; | 686 | __be32 *xp; |
687 | 687 | ||
688 | read_lock(&sk->sk_callback_lock); | 688 | read_lock(&sk->sk_callback_lock); |
689 | dprintk("RPC: xs_udp_data_ready...\n"); | 689 | dprintk("RPC: xs_udp_data_ready...\n"); |
690 | if (!(xprt = xprt_from_sock(sk))) | 690 | if (!(xprt = xprt_from_sock(sk))) |
691 | goto out; | 691 | goto out; |
692 | 692 | ||
@@ -698,7 +698,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
698 | 698 | ||
699 | repsize = skb->len - sizeof(struct udphdr); | 699 | repsize = skb->len - sizeof(struct udphdr); |
700 | if (repsize < 4) { | 700 | if (repsize < 4) { |
701 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); | 701 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
702 | goto dropit; | 702 | goto dropit; |
703 | } | 703 | } |
704 | 704 | ||
@@ -762,11 +762,11 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
762 | 762 | ||
763 | /* Sanity check of the record length */ | 763 | /* Sanity check of the record length */ |
764 | if (unlikely(transport->tcp_reclen < 4)) { | 764 | if (unlikely(transport->tcp_reclen < 4)) { |
765 | dprintk("RPC: invalid TCP record fragment length\n"); | 765 | dprintk("RPC: invalid TCP record fragment length\n"); |
766 | xprt_disconnect(xprt); | 766 | xprt_disconnect(xprt); |
767 | return; | 767 | return; |
768 | } | 768 | } |
769 | dprintk("RPC: reading TCP record fragment of length %d\n", | 769 | dprintk("RPC: reading TCP record fragment of length %d\n", |
770 | transport->tcp_reclen); | 770 | transport->tcp_reclen); |
771 | } | 771 | } |
772 | 772 | ||
@@ -789,7 +789,7 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r | |||
789 | char *p; | 789 | char *p; |
790 | 790 | ||
791 | len = sizeof(transport->tcp_xid) - transport->tcp_offset; | 791 | len = sizeof(transport->tcp_xid) - transport->tcp_offset; |
792 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | 792 | dprintk("RPC: reading XID (%Zu bytes)\n", len); |
793 | p = ((char *) &transport->tcp_xid) + transport->tcp_offset; | 793 | p = ((char *) &transport->tcp_xid) + transport->tcp_offset; |
794 | used = xdr_skb_read_bits(desc, p, len); | 794 | used = xdr_skb_read_bits(desc, p, len); |
795 | transport->tcp_offset += used; | 795 | transport->tcp_offset += used; |
@@ -798,7 +798,7 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r | |||
798 | transport->tcp_flags &= ~TCP_RCV_COPY_XID; | 798 | transport->tcp_flags &= ~TCP_RCV_COPY_XID; |
799 | transport->tcp_flags |= TCP_RCV_COPY_DATA; | 799 | transport->tcp_flags |= TCP_RCV_COPY_DATA; |
800 | transport->tcp_copied = 4; | 800 | transport->tcp_copied = 4; |
801 | dprintk("RPC: reading reply for XID %08x\n", | 801 | dprintk("RPC: reading reply for XID %08x\n", |
802 | ntohl(transport->tcp_xid)); | 802 | ntohl(transport->tcp_xid)); |
803 | xs_tcp_check_fraghdr(transport); | 803 | xs_tcp_check_fraghdr(transport); |
804 | } | 804 | } |
@@ -816,7 +816,7 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
816 | req = xprt_lookup_rqst(xprt, transport->tcp_xid); | 816 | req = xprt_lookup_rqst(xprt, transport->tcp_xid); |
817 | if (!req) { | 817 | if (!req) { |
818 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | 818 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
819 | dprintk("RPC: XID %08x request not found!\n", | 819 | dprintk("RPC: XID %08x request not found!\n", |
820 | ntohl(transport->tcp_xid)); | 820 | ntohl(transport->tcp_xid)); |
821 | spin_unlock(&xprt->transport_lock); | 821 | spin_unlock(&xprt->transport_lock); |
822 | return; | 822 | return; |
@@ -853,19 +853,20 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
853 | * be discarded. | 853 | * be discarded. |
854 | */ | 854 | */ |
855 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | 855 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
856 | dprintk("RPC: XID %08x truncated request\n", | 856 | dprintk("RPC: XID %08x truncated request\n", |
857 | ntohl(transport->tcp_xid)); | 857 | ntohl(transport->tcp_xid)); |
858 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 858 | dprintk("RPC: xprt = %p, tcp_copied = %lu, " |
859 | xprt, transport->tcp_copied, transport->tcp_offset, | 859 | "tcp_offset = %u, tcp_reclen = %u\n", |
860 | transport->tcp_reclen); | 860 | xprt, transport->tcp_copied, |
861 | transport->tcp_offset, transport->tcp_reclen); | ||
861 | goto out; | 862 | goto out; |
862 | } | 863 | } |
863 | 864 | ||
864 | dprintk("RPC: XID %08x read %Zd bytes\n", | 865 | dprintk("RPC: XID %08x read %Zd bytes\n", |
865 | ntohl(transport->tcp_xid), r); | 866 | ntohl(transport->tcp_xid), r); |
866 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 867 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " |
867 | xprt, transport->tcp_copied, transport->tcp_offset, | 868 | "tcp_reclen = %u\n", xprt, transport->tcp_copied, |
868 | transport->tcp_reclen); | 869 | transport->tcp_offset, transport->tcp_reclen); |
869 | 870 | ||
870 | if (transport->tcp_copied == req->rq_private_buf.buflen) | 871 | if (transport->tcp_copied == req->rq_private_buf.buflen) |
871 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | 872 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
@@ -891,7 +892,7 @@ static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_s | |||
891 | desc->count -= len; | 892 | desc->count -= len; |
892 | desc->offset += len; | 893 | desc->offset += len; |
893 | transport->tcp_offset += len; | 894 | transport->tcp_offset += len; |
894 | dprintk("RPC: discarded %Zu bytes\n", len); | 895 | dprintk("RPC: discarded %Zu bytes\n", len); |
895 | xs_tcp_check_fraghdr(transport); | 896 | xs_tcp_check_fraghdr(transport); |
896 | } | 897 | } |
897 | 898 | ||
@@ -905,7 +906,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns | |||
905 | .count = len, | 906 | .count = len, |
906 | }; | 907 | }; |
907 | 908 | ||
908 | dprintk("RPC: xs_tcp_data_recv started\n"); | 909 | dprintk("RPC: xs_tcp_data_recv started\n"); |
909 | do { | 910 | do { |
910 | /* Read in a new fragment marker if necessary */ | 911 | /* Read in a new fragment marker if necessary */ |
911 | /* Can we ever really expect to get completely empty fragments? */ | 912 | /* Can we ever really expect to get completely empty fragments? */ |
@@ -926,7 +927,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns | |||
926 | /* Skip over any trailing bytes on short reads */ | 927 | /* Skip over any trailing bytes on short reads */ |
927 | xs_tcp_read_discard(transport, &desc); | 928 | xs_tcp_read_discard(transport, &desc); |
928 | } while (desc.count); | 929 | } while (desc.count); |
929 | dprintk("RPC: xs_tcp_data_recv done\n"); | 930 | dprintk("RPC: xs_tcp_data_recv done\n"); |
930 | return len - desc.count; | 931 | return len - desc.count; |
931 | } | 932 | } |
932 | 933 | ||
@@ -941,8 +942,9 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes) | |||
941 | struct rpc_xprt *xprt; | 942 | struct rpc_xprt *xprt; |
942 | read_descriptor_t rd_desc; | 943 | read_descriptor_t rd_desc; |
943 | 944 | ||
945 | dprintk("RPC: xs_tcp_data_ready...\n"); | ||
946 | |||
944 | read_lock(&sk->sk_callback_lock); | 947 | read_lock(&sk->sk_callback_lock); |
945 | dprintk("RPC: xs_tcp_data_ready...\n"); | ||
946 | if (!(xprt = xprt_from_sock(sk))) | 948 | if (!(xprt = xprt_from_sock(sk))) |
947 | goto out; | 949 | goto out; |
948 | if (xprt->shutdown) | 950 | if (xprt->shutdown) |
@@ -968,11 +970,11 @@ static void xs_tcp_state_change(struct sock *sk) | |||
968 | read_lock(&sk->sk_callback_lock); | 970 | read_lock(&sk->sk_callback_lock); |
969 | if (!(xprt = xprt_from_sock(sk))) | 971 | if (!(xprt = xprt_from_sock(sk))) |
970 | goto out; | 972 | goto out; |
971 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); | 973 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
972 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", | 974 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", |
973 | sk->sk_state, xprt_connected(xprt), | 975 | sk->sk_state, xprt_connected(xprt), |
974 | sock_flag(sk, SOCK_DEAD), | 976 | sock_flag(sk, SOCK_DEAD), |
975 | sock_flag(sk, SOCK_ZAPPED)); | 977 | sock_flag(sk, SOCK_ZAPPED)); |
976 | 978 | ||
977 | switch (sk->sk_state) { | 979 | switch (sk->sk_state) { |
978 | case TCP_ESTABLISHED: | 980 | case TCP_ESTABLISHED: |
@@ -1140,7 +1142,7 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
1140 | { | 1142 | { |
1141 | struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr; | 1143 | struct sockaddr_in *sap = (struct sockaddr_in *) &xprt->addr; |
1142 | 1144 | ||
1143 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); | 1145 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); |
1144 | 1146 | ||
1145 | sap->sin_port = htons(port); | 1147 | sap->sin_port = htons(port); |
1146 | } | 1148 | } |
@@ -1159,7 +1161,7 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | |||
1159 | sizeof(myaddr)); | 1161 | sizeof(myaddr)); |
1160 | if (err == 0) { | 1162 | if (err == 0) { |
1161 | transport->port = port; | 1163 | transport->port = port; |
1162 | dprintk("RPC: xs_bindresvport bound to port %u\n", | 1164 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
1163 | port); | 1165 | port); |
1164 | return 0; | 1166 | return 0; |
1165 | } | 1167 | } |
@@ -1169,7 +1171,7 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | |||
1169 | port--; | 1171 | port--; |
1170 | } while (err == -EADDRINUSE && port != transport->port); | 1172 | } while (err == -EADDRINUSE && port != transport->port); |
1171 | 1173 | ||
1172 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); | 1174 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
1173 | return err; | 1175 | return err; |
1174 | } | 1176 | } |
1175 | 1177 | ||
@@ -1223,7 +1225,7 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1223 | xs_close(xprt); | 1225 | xs_close(xprt); |
1224 | 1226 | ||
1225 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { | 1227 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { |
1226 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | 1228 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); |
1227 | goto out; | 1229 | goto out; |
1228 | } | 1230 | } |
1229 | xs_reclassify_socket(sock); | 1231 | xs_reclassify_socket(sock); |
@@ -1233,7 +1235,7 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1233 | goto out; | 1235 | goto out; |
1234 | } | 1236 | } |
1235 | 1237 | ||
1236 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1238 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1237 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); | 1239 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1238 | 1240 | ||
1239 | if (!transport->inet) { | 1241 | if (!transport->inet) { |
@@ -1275,7 +1277,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1275 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 1277 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1276 | struct sockaddr any; | 1278 | struct sockaddr any; |
1277 | 1279 | ||
1278 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | 1280 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); |
1279 | 1281 | ||
1280 | /* | 1282 | /* |
1281 | * Disconnect the transport socket by doing a connect operation | 1283 | * Disconnect the transport socket by doing a connect operation |
@@ -1285,7 +1287,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1285 | any.sa_family = AF_UNSPEC; | 1287 | any.sa_family = AF_UNSPEC; |
1286 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); | 1288 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); |
1287 | if (result) | 1289 | if (result) |
1288 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | 1290 | dprintk("RPC: AF_UNSPEC connect return code %d\n", |
1289 | result); | 1291 | result); |
1290 | } | 1292 | } |
1291 | 1293 | ||
@@ -1309,7 +1311,8 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1309 | if (!sock) { | 1311 | if (!sock) { |
1310 | /* start from scratch */ | 1312 | /* start from scratch */ |
1311 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | 1313 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { |
1312 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | 1314 | dprintk("RPC: can't create TCP transport " |
1315 | "socket (%d).\n", -err); | ||
1313 | goto out; | 1316 | goto out; |
1314 | } | 1317 | } |
1315 | xs_reclassify_socket(sock); | 1318 | xs_reclassify_socket(sock); |
@@ -1322,7 +1325,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1322 | /* "close" the socket, preserving the local port */ | 1325 | /* "close" the socket, preserving the local port */ |
1323 | xs_tcp_reuse_connection(xprt); | 1326 | xs_tcp_reuse_connection(xprt); |
1324 | 1327 | ||
1325 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1328 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1326 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); | 1329 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1327 | 1330 | ||
1328 | if (!transport->inet) { | 1331 | if (!transport->inet) { |
@@ -1359,8 +1362,9 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1359 | xprt->stat.connect_start = jiffies; | 1362 | xprt->stat.connect_start = jiffies; |
1360 | status = kernel_connect(sock, (struct sockaddr *) &xprt->addr, | 1363 | status = kernel_connect(sock, (struct sockaddr *) &xprt->addr, |
1361 | xprt->addrlen, O_NONBLOCK); | 1364 | xprt->addrlen, O_NONBLOCK); |
1362 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | 1365 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", |
1363 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | 1366 | xprt, -status, xprt_connected(xprt), |
1367 | sock->sk->sk_state); | ||
1364 | if (status < 0) { | 1368 | if (status < 0) { |
1365 | switch (status) { | 1369 | switch (status) { |
1366 | case -EINPROGRESS: | 1370 | case -EINPROGRESS: |
@@ -1404,7 +1408,8 @@ static void xs_connect(struct rpc_task *task) | |||
1404 | return; | 1408 | return; |
1405 | 1409 | ||
1406 | if (transport->sock != NULL) { | 1410 | if (transport->sock != NULL) { |
1407 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", | 1411 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
1412 | "seconds\n", | ||
1408 | xprt, xprt->reestablish_timeout / HZ); | 1413 | xprt, xprt->reestablish_timeout / HZ); |
1409 | schedule_delayed_work(&transport->connect_worker, | 1414 | schedule_delayed_work(&transport->connect_worker, |
1410 | xprt->reestablish_timeout); | 1415 | xprt->reestablish_timeout); |
@@ -1412,7 +1417,7 @@ static void xs_connect(struct rpc_task *task) | |||
1412 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | 1417 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) |
1413 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1418 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1414 | } else { | 1419 | } else { |
1415 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1420 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1416 | schedule_delayed_work(&transport->connect_worker, 0); | 1421 | schedule_delayed_work(&transport->connect_worker, 0); |
1417 | 1422 | ||
1418 | /* flush_scheduled_work can sleep... */ | 1423 | /* flush_scheduled_work can sleep... */ |
@@ -1507,13 +1512,14 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
1507 | struct sock_xprt *new; | 1512 | struct sock_xprt *new; |
1508 | 1513 | ||
1509 | if (addrlen > sizeof(xprt->addr)) { | 1514 | if (addrlen > sizeof(xprt->addr)) { |
1510 | dprintk("RPC: xs_setup_xprt: address too large\n"); | 1515 | dprintk("RPC: xs_setup_xprt: address too large\n"); |
1511 | return ERR_PTR(-EBADF); | 1516 | return ERR_PTR(-EBADF); |
1512 | } | 1517 | } |
1513 | 1518 | ||
1514 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 1519 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
1515 | if (new == NULL) { | 1520 | if (new == NULL) { |
1516 | dprintk("RPC: xs_setup_xprt: couldn't allocate rpc_xprt\n"); | 1521 | dprintk("RPC: xs_setup_xprt: couldn't allocate " |
1522 | "rpc_xprt\n"); | ||
1517 | return ERR_PTR(-ENOMEM); | 1523 | return ERR_PTR(-ENOMEM); |
1518 | } | 1524 | } |
1519 | xprt = &new->xprt; | 1525 | xprt = &new->xprt; |
@@ -1522,7 +1528,8 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns | |||
1522 | xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); | 1528 | xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); |
1523 | if (xprt->slot == NULL) { | 1529 | if (xprt->slot == NULL) { |
1524 | kfree(xprt); | 1530 | kfree(xprt); |
1525 | dprintk("RPC: xs_setup_xprt: couldn't allocate slot table\n"); | 1531 | dprintk("RPC: xs_setup_xprt: couldn't allocate slot " |
1532 | "table\n"); | ||
1526 | return ERR_PTR(-ENOMEM); | 1533 | return ERR_PTR(-ENOMEM); |
1527 | } | 1534 | } |
1528 | 1535 | ||
@@ -1572,7 +1579,7 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1572 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); | 1579 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
1573 | 1580 | ||
1574 | xs_format_peer_addresses(xprt); | 1581 | xs_format_peer_addresses(xprt); |
1575 | dprintk("RPC: set up transport to address %s\n", | 1582 | dprintk("RPC: set up transport to address %s\n", |
1576 | xprt->address_strings[RPC_DISPLAY_ALL]); | 1583 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1577 | 1584 | ||
1578 | return xprt; | 1585 | return xprt; |
@@ -1616,7 +1623,7 @@ struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1616 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); | 1623 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
1617 | 1624 | ||
1618 | xs_format_peer_addresses(xprt); | 1625 | xs_format_peer_addresses(xprt); |
1619 | dprintk("RPC: set up transport to address %s\n", | 1626 | dprintk("RPC: set up transport to address %s\n", |
1620 | xprt->address_strings[RPC_DISPLAY_ALL]); | 1627 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1621 | 1628 | ||
1622 | return xprt; | 1629 | return xprt; |