aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-10-20 08:44:39 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-10-20 08:44:39 -0400
commitc7314d74fcb089b127ef5753b5263ac8473f33bc (patch)
tree0ca6b0cc8afcc0145d5eeaed90469470e60cf173 /fs
parent43ae9e3fc70ca0057ae0a24ef5eedff05e3fae06 (diff)
nfsd regression since delayed fput()
Background: nfsd v[23] had throughput regression since delayed fput went in; every read or write ends up doing fput() and we get a pair of extra context switches out of that (plus quite a bit of work in queue_work itselfi, apparently). Use of schedule_delayed_work() gives it a chance to accumulate a bit before we do __fput() on all of them. I'm not too happy about that solution, but... on at least one real-world setup it reverts about 10% throughput loss we got from switch to delayed fput. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/file_table.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/file_table.c b/fs/file_table.c
index abdd15ad13c9..e900ca518635 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -297,7 +297,7 @@ void flush_delayed_fput(void)
297 delayed_fput(NULL); 297 delayed_fput(NULL);
298} 298}
299 299
300static DECLARE_WORK(delayed_fput_work, delayed_fput); 300static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
301 301
302void fput(struct file *file) 302void fput(struct file *file)
303{ 303{
@@ -317,7 +317,7 @@ void fput(struct file *file)
317 } 317 }
318 318
319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) 319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
320 schedule_work(&delayed_fput_work); 320 schedule_delayed_work(&delayed_fput_work, 1);
321 } 321 }
322} 322}
323 323