diff options
author | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 01:49:49 -0500 |
---|---|---|
committer | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 01:50:26 -0500 |
commit | fa0d7e3de6d6fc5004ad9dea0dd6b286af8f03e9 (patch) | |
tree | 203e0f73883e4c26b5597e36042386a1237dab35 /mm | |
parent | 77812a1ef139d84270d27faacc0630c887411013 (diff) |
fs: icache RCU free inodes
RCU free the struct inode. This will allow:
- Subsequent store-free path walking patch. The inode must be consulted for
permissions when walking, so an RCU inode reference is a must.
- sb_inode_list_lock to be moved inside i_lock because sb list walkers who want
to take i_lock no longer need to take sb_inode_list_lock to walk the list in
the first place. This will simplify and optimize locking.
- Could remove some nested trylock loops in dcache code
- Could potentially simplify things a bit in VM land. Do not need to take the
page lock to follow page->mapping.
The downsides of this is the performance cost of using RCU. In a simple
creat/unlink microbenchmark, performance drops by about 10% due to inability to
reuse cache-hot slab objects. As iterations increase and RCU freeing starts
kicking over, this increases to about 20%.
In cases where inode lifetimes are longer (ie. many inodes may be allocated
during the average life span of a single inode), a lot of this cache reuse is
not applicable, so the regression caused by this patch is smaller.
The cache-hot regression could largely be avoided by using SLAB_DESTROY_BY_RCU,
however this adds some complexity to list walking and store-free path walking,
so I prefer to implement this at a later date, if it is shown to be a win in
real situations. I haven't found a regression in any non-micro benchmark so I
doubt it will be a problem.
Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/shmem.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 47fdeeb9d636..5ee67c990602 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2415,13 +2415,20 @@ static struct inode *shmem_alloc_inode(struct super_block *sb) | |||
2415 | return &p->vfs_inode; | 2415 | return &p->vfs_inode; |
2416 | } | 2416 | } |
2417 | 2417 | ||
2418 | static void shmem_i_callback(struct rcu_head *head) | ||
2419 | { | ||
2420 | struct inode *inode = container_of(head, struct inode, i_rcu); | ||
2421 | INIT_LIST_HEAD(&inode->i_dentry); | ||
2422 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); | ||
2423 | } | ||
2424 | |||
2418 | static void shmem_destroy_inode(struct inode *inode) | 2425 | static void shmem_destroy_inode(struct inode *inode) |
2419 | { | 2426 | { |
2420 | if ((inode->i_mode & S_IFMT) == S_IFREG) { | 2427 | if ((inode->i_mode & S_IFMT) == S_IFREG) { |
2421 | /* only struct inode is valid if it's an inline symlink */ | 2428 | /* only struct inode is valid if it's an inline symlink */ |
2422 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); | 2429 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); |
2423 | } | 2430 | } |
2424 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); | 2431 | call_rcu(&inode->i_rcu, shmem_i_callback); |
2425 | } | 2432 | } |
2426 | 2433 | ||
2427 | static void init_once(void *foo) | 2434 | static void init_once(void *foo) |