diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2014-05-01 10:30:00 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-05-01 10:30:00 -0400 |
commit | 41edf278fc2f042f4e22a12ed87d19c5201210e1 (patch) | |
tree | 4f49bfe5f401dcc7477fb630f3773a59402b634a /fs/dcache.c | |
parent | 01b6035190b024240a43ac1d8e9c6f964f5f1c63 (diff) |
dentry_kill(): don't try to remove from shrink list
If the victim in on the shrink list, don't remove it from there.
If shrink_dentry_list() manages to remove it from the list before
we are done - fine, we'll just free it as usual. If not - mark
it with new flag (DCACHE_MAY_FREE) and leave it there.
Eventually, shrink_dentry_list() will get to it, remove the sucker
from shrink list and call dentry_kill(dentry, 0). Which is where
we'll deal with freeing.
Since now dentry_kill(dentry, 0) may happen after or during
dentry_kill(dentry, 1), we need to recognize that (by seeing
DCACHE_DENTRY_KILLED already set), unlock everything
and either free the sucker (in case DCACHE_MAY_FREE has been
set) or leave it for ongoing dentry_kill(dentry, 1) to deal with.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 27 |
1 files changed, 19 insertions, 8 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index e482775343a0..58e26bee7ef4 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -468,7 +468,14 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure) | |||
468 | __releases(dentry->d_lock) | 468 | __releases(dentry->d_lock) |
469 | { | 469 | { |
470 | struct inode *inode; | 470 | struct inode *inode; |
471 | struct dentry *parent; | 471 | struct dentry *parent = NULL; |
472 | bool can_free = true; | ||
473 | |||
474 | if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { | ||
475 | can_free = dentry->d_flags & DCACHE_MAY_FREE; | ||
476 | spin_unlock(&dentry->d_lock); | ||
477 | goto out; | ||
478 | } | ||
472 | 479 | ||
473 | inode = dentry->d_inode; | 480 | inode = dentry->d_inode; |
474 | if (inode && !spin_trylock(&inode->i_lock)) { | 481 | if (inode && !spin_trylock(&inode->i_lock)) { |
@@ -479,9 +486,7 @@ relock: | |||
479 | } | 486 | } |
480 | return dentry; /* try again with same dentry */ | 487 | return dentry; /* try again with same dentry */ |
481 | } | 488 | } |
482 | if (IS_ROOT(dentry)) | 489 | if (!IS_ROOT(dentry)) |
483 | parent = NULL; | ||
484 | else | ||
485 | parent = dentry->d_parent; | 490 | parent = dentry->d_parent; |
486 | if (parent && !spin_trylock(&parent->d_lock)) { | 491 | if (parent && !spin_trylock(&parent->d_lock)) { |
487 | if (inode) | 492 | if (inode) |
@@ -504,8 +509,6 @@ relock: | |||
504 | if (dentry->d_flags & DCACHE_LRU_LIST) { | 509 | if (dentry->d_flags & DCACHE_LRU_LIST) { |
505 | if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) | 510 | if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) |
506 | d_lru_del(dentry); | 511 | d_lru_del(dentry); |
507 | else | ||
508 | d_shrink_del(dentry); | ||
509 | } | 512 | } |
510 | /* if it was on the hash then remove it */ | 513 | /* if it was on the hash then remove it */ |
511 | __d_drop(dentry); | 514 | __d_drop(dentry); |
@@ -527,7 +530,15 @@ relock: | |||
527 | if (dentry->d_op && dentry->d_op->d_release) | 530 | if (dentry->d_op && dentry->d_op->d_release) |
528 | dentry->d_op->d_release(dentry); | 531 | dentry->d_op->d_release(dentry); |
529 | 532 | ||
530 | dentry_free(dentry); | 533 | spin_lock(&dentry->d_lock); |
534 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { | ||
535 | dentry->d_flags |= DCACHE_MAY_FREE; | ||
536 | can_free = false; | ||
537 | } | ||
538 | spin_unlock(&dentry->d_lock); | ||
539 | out: | ||
540 | if (likely(can_free)) | ||
541 | dentry_free(dentry); | ||
531 | return parent; | 542 | return parent; |
532 | } | 543 | } |
533 | 544 | ||
@@ -829,7 +840,7 @@ static void shrink_dentry_list(struct list_head *list) | |||
829 | * We found an inuse dentry which was not removed from | 840 | * We found an inuse dentry which was not removed from |
830 | * the LRU because of laziness during lookup. Do not free it. | 841 | * the LRU because of laziness during lookup. Do not free it. |
831 | */ | 842 | */ |
832 | if (dentry->d_lockref.count) { | 843 | if ((int)dentry->d_lockref.count > 0) { |
833 | spin_unlock(&dentry->d_lock); | 844 | spin_unlock(&dentry->d_lock); |
834 | continue; | 845 | continue; |
835 | } | 846 | } |