diff options
author | Greg Banks <gnb@sgi.com> | 2009-01-13 05:26:36 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2009-03-18 17:38:42 -0400 |
commit | 03cf6c9f49a8fea953d38648d016e3f46e814991 (patch) | |
tree | c7e639bd9fb775af1919d2442d0bf2306dfbfca7 /net/sunrpc | |
parent | 59a252ff8c0f2fa32c896f69d56ae33e641ce7ad (diff) |
knfsd: add file to export stats about nfsd pools
Add /proc/fs/nfsd/pool_stats to export to userspace various
statistics about the operation of rpc server thread pools.
This patch is based on a forward-ported version of
knfsd-add-pool-thread-stats which has been shipping in the SGI
"Enhanced NFS" product since 2006 and which was previously
posted:
http://article.gmane.org/gmane.linux.nfs/10375
It has also been updated thus:
* moved EXPORT_SYMBOL() to near the function it exports
* made the new struct struct seq_operations const
* used SEQ_START_TOKEN instead of ((void *)1)
* merged fix from SGI PV 990526 "sunrpc: use dprintk instead of
printk in svc_pool_stats_*()" by Harshula Jayasuriya.
* merged fix from SGI PV 964001 "Crash reading pool_stats before
nfsds are started".
Signed-off-by: Greg Banks <gnb@sgi.com>
Signed-off-by: Harshula Jayasuriya <harshula@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 100 |
1 files changed, 99 insertions, 1 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 0551b6b6cf8c..1e66f2491460 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -318,6 +318,8 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
318 | goto out_unlock; | 318 | goto out_unlock; |
319 | } | 319 | } |
320 | 320 | ||
321 | pool->sp_stats.packets++; | ||
322 | |||
321 | /* Mark transport as busy. It will remain in this state until | 323 | /* Mark transport as busy. It will remain in this state until |
322 | * the provider calls svc_xprt_received. We update XPT_BUSY | 324 | * the provider calls svc_xprt_received. We update XPT_BUSY |
323 | * atomically because it also guards against trying to enqueue | 325 | * atomically because it also guards against trying to enqueue |
@@ -355,6 +357,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
355 | if (pool->sp_nwaking >= SVC_MAX_WAKING) { | 357 | if (pool->sp_nwaking >= SVC_MAX_WAKING) { |
356 | /* too many threads are runnable and trying to wake up */ | 358 | /* too many threads are runnable and trying to wake up */ |
357 | thread_avail = 0; | 359 | thread_avail = 0; |
360 | pool->sp_stats.overloads_avoided++; | ||
358 | } | 361 | } |
359 | 362 | ||
360 | if (thread_avail) { | 363 | if (thread_avail) { |
@@ -374,11 +377,13 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
374 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 377 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
375 | rqstp->rq_waking = 1; | 378 | rqstp->rq_waking = 1; |
376 | pool->sp_nwaking++; | 379 | pool->sp_nwaking++; |
380 | pool->sp_stats.threads_woken++; | ||
377 | BUG_ON(xprt->xpt_pool != pool); | 381 | BUG_ON(xprt->xpt_pool != pool); |
378 | wake_up(&rqstp->rq_wait); | 382 | wake_up(&rqstp->rq_wait); |
379 | } else { | 383 | } else { |
380 | dprintk("svc: transport %p put into queue\n", xprt); | 384 | dprintk("svc: transport %p put into queue\n", xprt); |
381 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | 385 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); |
386 | pool->sp_stats.sockets_queued++; | ||
382 | BUG_ON(xprt->xpt_pool != pool); | 387 | BUG_ON(xprt->xpt_pool != pool); |
383 | } | 388 | } |
384 | 389 | ||
@@ -591,6 +596,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
591 | int pages; | 596 | int pages; |
592 | struct xdr_buf *arg; | 597 | struct xdr_buf *arg; |
593 | DECLARE_WAITQUEUE(wait, current); | 598 | DECLARE_WAITQUEUE(wait, current); |
599 | long time_left; | ||
594 | 600 | ||
595 | dprintk("svc: server %p waiting for data (to = %ld)\n", | 601 | dprintk("svc: server %p waiting for data (to = %ld)\n", |
596 | rqstp, timeout); | 602 | rqstp, timeout); |
@@ -676,12 +682,14 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
676 | add_wait_queue(&rqstp->rq_wait, &wait); | 682 | add_wait_queue(&rqstp->rq_wait, &wait); |
677 | spin_unlock_bh(&pool->sp_lock); | 683 | spin_unlock_bh(&pool->sp_lock); |
678 | 684 | ||
679 | schedule_timeout(timeout); | 685 | time_left = schedule_timeout(timeout); |
680 | 686 | ||
681 | try_to_freeze(); | 687 | try_to_freeze(); |
682 | 688 | ||
683 | spin_lock_bh(&pool->sp_lock); | 689 | spin_lock_bh(&pool->sp_lock); |
684 | remove_wait_queue(&rqstp->rq_wait, &wait); | 690 | remove_wait_queue(&rqstp->rq_wait, &wait); |
691 | if (!time_left) | ||
692 | pool->sp_stats.threads_timedout++; | ||
685 | 693 | ||
686 | xprt = rqstp->rq_xprt; | 694 | xprt = rqstp->rq_xprt; |
687 | if (!xprt) { | 695 | if (!xprt) { |
@@ -1114,3 +1122,93 @@ int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen) | |||
1114 | return totlen; | 1122 | return totlen; |
1115 | } | 1123 | } |
1116 | EXPORT_SYMBOL_GPL(svc_xprt_names); | 1124 | EXPORT_SYMBOL_GPL(svc_xprt_names); |
1125 | |||
1126 | |||
1127 | /*----------------------------------------------------------------------------*/ | ||
1128 | |||
1129 | static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) | ||
1130 | { | ||
1131 | unsigned int pidx = (unsigned int)*pos; | ||
1132 | struct svc_serv *serv = m->private; | ||
1133 | |||
1134 | dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); | ||
1135 | |||
1136 | lock_kernel(); | ||
1137 | /* bump up the pseudo refcount while traversing */ | ||
1138 | svc_get(serv); | ||
1139 | unlock_kernel(); | ||
1140 | |||
1141 | if (!pidx) | ||
1142 | return SEQ_START_TOKEN; | ||
1143 | return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); | ||
1144 | } | ||
1145 | |||
1146 | static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) | ||
1147 | { | ||
1148 | struct svc_pool *pool = p; | ||
1149 | struct svc_serv *serv = m->private; | ||
1150 | |||
1151 | dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); | ||
1152 | |||
1153 | if (p == SEQ_START_TOKEN) { | ||
1154 | pool = &serv->sv_pools[0]; | ||
1155 | } else { | ||
1156 | unsigned int pidx = (pool - &serv->sv_pools[0]); | ||
1157 | if (pidx < serv->sv_nrpools-1) | ||
1158 | pool = &serv->sv_pools[pidx+1]; | ||
1159 | else | ||
1160 | pool = NULL; | ||
1161 | } | ||
1162 | ++*pos; | ||
1163 | return pool; | ||
1164 | } | ||
1165 | |||
1166 | static void svc_pool_stats_stop(struct seq_file *m, void *p) | ||
1167 | { | ||
1168 | struct svc_serv *serv = m->private; | ||
1169 | |||
1170 | lock_kernel(); | ||
1171 | /* this function really, really should have been called svc_put() */ | ||
1172 | svc_destroy(serv); | ||
1173 | unlock_kernel(); | ||
1174 | } | ||
1175 | |||
1176 | static int svc_pool_stats_show(struct seq_file *m, void *p) | ||
1177 | { | ||
1178 | struct svc_pool *pool = p; | ||
1179 | |||
1180 | if (p == SEQ_START_TOKEN) { | ||
1181 | seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); | ||
1182 | return 0; | ||
1183 | } | ||
1184 | |||
1185 | seq_printf(m, "%u %lu %lu %lu %lu %lu\n", | ||
1186 | pool->sp_id, | ||
1187 | pool->sp_stats.packets, | ||
1188 | pool->sp_stats.sockets_queued, | ||
1189 | pool->sp_stats.threads_woken, | ||
1190 | pool->sp_stats.overloads_avoided, | ||
1191 | pool->sp_stats.threads_timedout); | ||
1192 | |||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1196 | static const struct seq_operations svc_pool_stats_seq_ops = { | ||
1197 | .start = svc_pool_stats_start, | ||
1198 | .next = svc_pool_stats_next, | ||
1199 | .stop = svc_pool_stats_stop, | ||
1200 | .show = svc_pool_stats_show, | ||
1201 | }; | ||
1202 | |||
1203 | int svc_pool_stats_open(struct svc_serv *serv, struct file *file) | ||
1204 | { | ||
1205 | int err; | ||
1206 | |||
1207 | err = seq_open(file, &svc_pool_stats_seq_ops); | ||
1208 | if (!err) | ||
1209 | ((struct seq_file *) file->private_data)->private = serv; | ||
1210 | return err; | ||
1211 | } | ||
1212 | EXPORT_SYMBOL(svc_pool_stats_open); | ||
1213 | |||
1214 | /*----------------------------------------------------------------------------*/ | ||