aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c109
1 files changed, 62 insertions, 47 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 940d188e5d14..313b54b2b8f2 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -359,12 +359,13 @@ restart:
359} 359}
360 360
361/* 361/*
362 * Throw away a dentry - free the inode, dput the parent. 362 * Throw away a dentry - free the inode, dput the parent. This requires that
363 * This requires that the LRU list has already been 363 * the LRU list has already been removed.
364 * removed. 364 *
365 * Called with dcache_lock, drops it and then regains. 365 * Called with dcache_lock, drops it and then regains.
366 * Called with dentry->d_lock held, drops it.
366 */ 367 */
367static inline void prune_one_dentry(struct dentry * dentry) 368static void prune_one_dentry(struct dentry * dentry)
368{ 369{
369 struct dentry * parent; 370 struct dentry * parent;
370 371
@@ -382,6 +383,8 @@ static inline void prune_one_dentry(struct dentry * dentry)
382/** 383/**
383 * prune_dcache - shrink the dcache 384 * prune_dcache - shrink the dcache
384 * @count: number of entries to try and free 385 * @count: number of entries to try and free
386 * @sb: if given, ignore dentries for other superblocks
387 * which are being unmounted.
385 * 388 *
386 * Shrink the dcache. This is done when we need 389 * Shrink the dcache. This is done when we need
387 * more memory, or simply when we need to unmount 390 * more memory, or simply when we need to unmount
@@ -392,16 +395,29 @@ static inline void prune_one_dentry(struct dentry * dentry)
392 * all the dentries are in use. 395 * all the dentries are in use.
393 */ 396 */
394 397
395static void prune_dcache(int count) 398static void prune_dcache(int count, struct super_block *sb)
396{ 399{
397 spin_lock(&dcache_lock); 400 spin_lock(&dcache_lock);
398 for (; count ; count--) { 401 for (; count ; count--) {
399 struct dentry *dentry; 402 struct dentry *dentry;
400 struct list_head *tmp; 403 struct list_head *tmp;
404 struct rw_semaphore *s_umount;
401 405
402 cond_resched_lock(&dcache_lock); 406 cond_resched_lock(&dcache_lock);
403 407
404 tmp = dentry_unused.prev; 408 tmp = dentry_unused.prev;
409 if (unlikely(sb)) {
410 /* Try to find a dentry for this sb, but don't try
411 * too hard, if they aren't near the tail they will
412 * be moved down again soon
413 */
414 int skip = count;
415 while (skip && tmp != &dentry_unused &&
416 list_entry(tmp, struct dentry, d_lru)->d_sb != sb) {
417 skip--;
418 tmp = tmp->prev;
419 }
420 }
405 if (tmp == &dentry_unused) 421 if (tmp == &dentry_unused)
406 break; 422 break;
407 list_del_init(tmp); 423 list_del_init(tmp);
@@ -427,7 +443,45 @@ static void prune_dcache(int count)
427 spin_unlock(&dentry->d_lock); 443 spin_unlock(&dentry->d_lock);
428 continue; 444 continue;
429 } 445 }
430 prune_one_dentry(dentry); 446 /*
447 * If the dentry is not DCACHED_REFERENCED, it is time
448 * to remove it from the dcache, provided the super block is
449 * NULL (which means we are trying to reclaim memory)
450 * or this dentry belongs to the same super block that
451 * we want to shrink.
452 */
453 /*
454 * If this dentry is for "my" filesystem, then I can prune it
455 * without taking the s_umount lock (I already hold it).
456 */
457 if (sb && dentry->d_sb == sb) {
458 prune_one_dentry(dentry);
459 continue;
460 }
461 /*
462 * ...otherwise we need to be sure this filesystem isn't being
463 * unmounted, otherwise we could race with
464 * generic_shutdown_super(), and end up holding a reference to
465 * an inode while the filesystem is unmounted.
466 * So we try to get s_umount, and make sure s_root isn't NULL.
467 * (Take a local copy of s_umount to avoid a use-after-free of
468 * `dentry').
469 */
470 s_umount = &dentry->d_sb->s_umount;
471 if (down_read_trylock(s_umount)) {
472 if (dentry->d_sb->s_root != NULL) {
473 prune_one_dentry(dentry);
474 up_read(s_umount);
475 continue;
476 }
477 up_read(s_umount);
478 }
479 spin_unlock(&dentry->d_lock);
480 /* Cannot remove the first dentry, and it isn't appropriate
481 * to move it to the head of the list, so give up, and try
482 * later
483 */
484 break;
431 } 485 }
432 spin_unlock(&dcache_lock); 486 spin_unlock(&dcache_lock);
433} 487}
@@ -630,46 +684,7 @@ void shrink_dcache_parent(struct dentry * parent)
630 int found; 684 int found;
631 685
632 while ((found = select_parent(parent)) != 0) 686 while ((found = select_parent(parent)) != 0)
633 prune_dcache(found); 687 prune_dcache(found, parent->d_sb);
634}
635
636/**
637 * shrink_dcache_anon - further prune the cache
638 * @head: head of d_hash list of dentries to prune
639 *
640 * Prune the dentries that are anonymous
641 *
642 * parsing d_hash list does not hlist_for_each_entry_rcu() as it
643 * done under dcache_lock.
644 *
645 */
646void shrink_dcache_anon(struct hlist_head *head)
647{
648 struct hlist_node *lp;
649 int found;
650 do {
651 found = 0;
652 spin_lock(&dcache_lock);
653 hlist_for_each(lp, head) {
654 struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
655 if (!list_empty(&this->d_lru)) {
656 dentry_stat.nr_unused--;
657 list_del_init(&this->d_lru);
658 }
659
660 /*
661 * move only zero ref count dentries to the end
662 * of the unused list for prune_dcache
663 */
664 if (!atomic_read(&this->d_count)) {
665 list_add_tail(&this->d_lru, &dentry_unused);
666 dentry_stat.nr_unused++;
667 found++;
668 }
669 }
670 spin_unlock(&dcache_lock);
671 prune_dcache(found);
672 } while(found);
673} 688}
674 689
675/* 690/*
@@ -689,7 +704,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
689 if (nr) { 704 if (nr) {
690 if (!(gfp_mask & __GFP_FS)) 705 if (!(gfp_mask & __GFP_FS))
691 return -1; 706 return -1;
692 prune_dcache(nr); 707 prune_dcache(nr, NULL);
693 } 708 }
694 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 709 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
695} 710}