aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:49:31 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:20 -0500
commit2304450783dfde7b0b94ae234edd0dbffa865073 (patch)
treeb3435e65c24d69ccad9ef9492624f5b6081d86b8 /fs/dcache.c
parent789680d1ee9311cdf095241dc02bd9784d799cd1 (diff)
fs: dcache scale lru
Add a new lock, dcache_lru_lock, to protect the dcache LRU list from concurrent modification. d_lru is also protected by d_lock, which allows LRU lists to be accessed without the lru lock, using RCU in future patches. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c112
1 files changed, 84 insertions, 28 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 1e124d4673c6..3d3c843c36ed 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -37,11 +37,19 @@
37 37
38/* 38/*
39 * Usage: 39 * Usage:
40 * dcache_hash_lock protects dcache hash table, s_anon lists 40 * dcache_hash_lock protects:
41 * - the dcache hash table, s_anon lists
42 * dcache_lru_lock protects:
43 * - the dcache lru lists and counters
44 * d_lock protects:
45 * - d_flags
46 * - d_name
47 * - d_lru
41 * 48 *
42 * Ordering: 49 * Ordering:
43 * dcache_lock 50 * dcache_lock
44 * dentry->d_lock 51 * dentry->d_lock
52 * dcache_lru_lock
45 * dcache_hash_lock 53 * dcache_hash_lock
46 * 54 *
47 * if (dentry1 < dentry2) 55 * if (dentry1 < dentry2)
@@ -52,6 +60,7 @@ int sysctl_vfs_cache_pressure __read_mostly = 100;
52EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 60EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
53 61
54static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock); 62static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock);
63static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
55__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 64__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
56__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 65__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
57 66
@@ -154,28 +163,38 @@ static void dentry_iput(struct dentry * dentry)
154} 163}
155 164
156/* 165/*
157 * dentry_lru_(add|del|move_tail) must be called with dcache_lock held. 166 * dentry_lru_(add|del|move_tail) must be called with d_lock held.
158 */ 167 */
159static void dentry_lru_add(struct dentry *dentry) 168static void dentry_lru_add(struct dentry *dentry)
160{ 169{
161 if (list_empty(&dentry->d_lru)) { 170 if (list_empty(&dentry->d_lru)) {
171 spin_lock(&dcache_lru_lock);
162 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 172 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
163 dentry->d_sb->s_nr_dentry_unused++; 173 dentry->d_sb->s_nr_dentry_unused++;
164 dentry_stat.nr_unused++; 174 dentry_stat.nr_unused++;
175 spin_unlock(&dcache_lru_lock);
165 } 176 }
166} 177}
167 178
179static void __dentry_lru_del(struct dentry *dentry)
180{
181 list_del_init(&dentry->d_lru);
182 dentry->d_sb->s_nr_dentry_unused--;
183 dentry_stat.nr_unused--;
184}
185
168static void dentry_lru_del(struct dentry *dentry) 186static void dentry_lru_del(struct dentry *dentry)
169{ 187{
170 if (!list_empty(&dentry->d_lru)) { 188 if (!list_empty(&dentry->d_lru)) {
171 list_del_init(&dentry->d_lru); 189 spin_lock(&dcache_lru_lock);
172 dentry->d_sb->s_nr_dentry_unused--; 190 __dentry_lru_del(dentry);
173 dentry_stat.nr_unused--; 191 spin_unlock(&dcache_lru_lock);
174 } 192 }
175} 193}
176 194
177static void dentry_lru_move_tail(struct dentry *dentry) 195static void dentry_lru_move_tail(struct dentry *dentry)
178{ 196{
197 spin_lock(&dcache_lru_lock);
179 if (list_empty(&dentry->d_lru)) { 198 if (list_empty(&dentry->d_lru)) {
180 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 199 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
181 dentry->d_sb->s_nr_dentry_unused++; 200 dentry->d_sb->s_nr_dentry_unused++;
@@ -183,6 +202,7 @@ static void dentry_lru_move_tail(struct dentry *dentry)
183 } else { 202 } else {
184 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 203 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
185 } 204 }
205 spin_unlock(&dcache_lru_lock);
186} 206}
187 207
188/** 208/**
@@ -192,6 +212,8 @@ static void dentry_lru_move_tail(struct dentry *dentry)
192 * The dentry must already be unhashed and removed from the LRU. 212 * The dentry must already be unhashed and removed from the LRU.
193 * 213 *
194 * If this is the root of the dentry tree, return NULL. 214 * If this is the root of the dentry tree, return NULL.
215 *
216 * dcache_lock and d_lock must be held by caller, are dropped by d_kill.
195 */ 217 */
196static struct dentry *d_kill(struct dentry *dentry) 218static struct dentry *d_kill(struct dentry *dentry)
197 __releases(dentry->d_lock) 219 __releases(dentry->d_lock)
@@ -383,10 +405,19 @@ int d_invalidate(struct dentry * dentry)
383EXPORT_SYMBOL(d_invalidate); 405EXPORT_SYMBOL(d_invalidate);
384 406
385/* This should be called _only_ with dcache_lock held */ 407/* This should be called _only_ with dcache_lock held */
408static inline struct dentry * __dget_locked_dlock(struct dentry *dentry)
409{
410 atomic_inc(&dentry->d_count);
411 dentry_lru_del(dentry);
412 return dentry;
413}
414
386static inline struct dentry * __dget_locked(struct dentry *dentry) 415static inline struct dentry * __dget_locked(struct dentry *dentry)
387{ 416{
388 atomic_inc(&dentry->d_count); 417 atomic_inc(&dentry->d_count);
418 spin_lock(&dentry->d_lock);
389 dentry_lru_del(dentry); 419 dentry_lru_del(dentry);
420 spin_unlock(&dentry->d_lock);
390 return dentry; 421 return dentry;
391} 422}
392 423
@@ -465,7 +496,7 @@ restart:
465 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 496 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
466 spin_lock(&dentry->d_lock); 497 spin_lock(&dentry->d_lock);
467 if (!atomic_read(&dentry->d_count)) { 498 if (!atomic_read(&dentry->d_count)) {
468 __dget_locked(dentry); 499 __dget_locked_dlock(dentry);
469 __d_drop(dentry); 500 __d_drop(dentry);
470 spin_unlock(&dentry->d_lock); 501 spin_unlock(&dentry->d_lock);
471 spin_unlock(&dcache_lock); 502 spin_unlock(&dcache_lock);
@@ -489,7 +520,6 @@ EXPORT_SYMBOL(d_prune_aliases);
489static void prune_one_dentry(struct dentry * dentry) 520static void prune_one_dentry(struct dentry * dentry)
490 __releases(dentry->d_lock) 521 __releases(dentry->d_lock)
491 __releases(dcache_lock) 522 __releases(dcache_lock)
492 __acquires(dcache_lock)
493{ 523{
494 __d_drop(dentry); 524 __d_drop(dentry);
495 dentry = d_kill(dentry); 525 dentry = d_kill(dentry);
@@ -498,15 +528,16 @@ static void prune_one_dentry(struct dentry * dentry)
498 * Prune ancestors. Locking is simpler than in dput(), 528 * Prune ancestors. Locking is simpler than in dput(),
499 * because dcache_lock needs to be taken anyway. 529 * because dcache_lock needs to be taken anyway.
500 */ 530 */
501 spin_lock(&dcache_lock);
502 while (dentry) { 531 while (dentry) {
503 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) 532 spin_lock(&dcache_lock);
533 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) {
534 spin_unlock(&dcache_lock);
504 return; 535 return;
536 }
505 537
506 dentry_lru_del(dentry); 538 dentry_lru_del(dentry);
507 __d_drop(dentry); 539 __d_drop(dentry);
508 dentry = d_kill(dentry); 540 dentry = d_kill(dentry);
509 spin_lock(&dcache_lock);
510 } 541 }
511} 542}
512 543
@@ -516,21 +547,31 @@ static void shrink_dentry_list(struct list_head *list)
516 547
517 while (!list_empty(list)) { 548 while (!list_empty(list)) {
518 dentry = list_entry(list->prev, struct dentry, d_lru); 549 dentry = list_entry(list->prev, struct dentry, d_lru);
519 dentry_lru_del(dentry); 550
551 if (!spin_trylock(&dentry->d_lock)) {
552 spin_unlock(&dcache_lru_lock);
553 cpu_relax();
554 spin_lock(&dcache_lru_lock);
555 continue;
556 }
557
558 __dentry_lru_del(dentry);
520 559
521 /* 560 /*
522 * We found an inuse dentry which was not removed from 561 * We found an inuse dentry which was not removed from
523 * the LRU because of laziness during lookup. Do not free 562 * the LRU because of laziness during lookup. Do not free
524 * it - just keep it off the LRU list. 563 * it - just keep it off the LRU list.
525 */ 564 */
526 spin_lock(&dentry->d_lock);
527 if (atomic_read(&dentry->d_count)) { 565 if (atomic_read(&dentry->d_count)) {
528 spin_unlock(&dentry->d_lock); 566 spin_unlock(&dentry->d_lock);
529 continue; 567 continue;
530 } 568 }
569 spin_unlock(&dcache_lru_lock);
570
531 prune_one_dentry(dentry); 571 prune_one_dentry(dentry);
532 /* dentry->d_lock was dropped in prune_one_dentry() */ 572 /* dcache_lock and dentry->d_lock dropped */
533 cond_resched_lock(&dcache_lock); 573 spin_lock(&dcache_lock);
574 spin_lock(&dcache_lru_lock);
534 } 575 }
535} 576}
536 577
@@ -551,32 +592,36 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
551 int cnt = *count; 592 int cnt = *count;
552 593
553 spin_lock(&dcache_lock); 594 spin_lock(&dcache_lock);
595relock:
596 spin_lock(&dcache_lru_lock);
554 while (!list_empty(&sb->s_dentry_lru)) { 597 while (!list_empty(&sb->s_dentry_lru)) {
555 dentry = list_entry(sb->s_dentry_lru.prev, 598 dentry = list_entry(sb->s_dentry_lru.prev,
556 struct dentry, d_lru); 599 struct dentry, d_lru);
557 BUG_ON(dentry->d_sb != sb); 600 BUG_ON(dentry->d_sb != sb);
558 601
602 if (!spin_trylock(&dentry->d_lock)) {
603 spin_unlock(&dcache_lru_lock);
604 cpu_relax();
605 goto relock;
606 }
607
559 /* 608 /*
560 * If we are honouring the DCACHE_REFERENCED flag and the 609 * If we are honouring the DCACHE_REFERENCED flag and the
561 * dentry has this flag set, don't free it. Clear the flag 610 * dentry has this flag set, don't free it. Clear the flag
562 * and put it back on the LRU. 611 * and put it back on the LRU.
563 */ 612 */
564 if (flags & DCACHE_REFERENCED) { 613 if (flags & DCACHE_REFERENCED &&
565 spin_lock(&dentry->d_lock); 614 dentry->d_flags & DCACHE_REFERENCED) {
566 if (dentry->d_flags & DCACHE_REFERENCED) { 615 dentry->d_flags &= ~DCACHE_REFERENCED;
567 dentry->d_flags &= ~DCACHE_REFERENCED; 616 list_move(&dentry->d_lru, &referenced);
568 list_move(&dentry->d_lru, &referenced); 617 spin_unlock(&dentry->d_lock);
569 spin_unlock(&dentry->d_lock); 618 } else {
570 cond_resched_lock(&dcache_lock); 619 list_move_tail(&dentry->d_lru, &tmp);
571 continue;
572 }
573 spin_unlock(&dentry->d_lock); 620 spin_unlock(&dentry->d_lock);
621 if (!--cnt)
622 break;
574 } 623 }
575 624 /* XXX: re-add cond_resched_lock when dcache_lock goes away */
576 list_move_tail(&dentry->d_lru, &tmp);
577 if (!--cnt)
578 break;
579 cond_resched_lock(&dcache_lock);
580 } 625 }
581 626
582 *count = cnt; 627 *count = cnt;
@@ -584,6 +629,7 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
584 629
585 if (!list_empty(&referenced)) 630 if (!list_empty(&referenced))
586 list_splice(&referenced, &sb->s_dentry_lru); 631 list_splice(&referenced, &sb->s_dentry_lru);
632 spin_unlock(&dcache_lru_lock);
587 spin_unlock(&dcache_lock); 633 spin_unlock(&dcache_lock);
588 634
589} 635}
@@ -679,10 +725,12 @@ void shrink_dcache_sb(struct super_block *sb)
679 LIST_HEAD(tmp); 725 LIST_HEAD(tmp);
680 726
681 spin_lock(&dcache_lock); 727 spin_lock(&dcache_lock);
728 spin_lock(&dcache_lru_lock);
682 while (!list_empty(&sb->s_dentry_lru)) { 729 while (!list_empty(&sb->s_dentry_lru)) {
683 list_splice_init(&sb->s_dentry_lru, &tmp); 730 list_splice_init(&sb->s_dentry_lru, &tmp);
684 shrink_dentry_list(&tmp); 731 shrink_dentry_list(&tmp);
685 } 732 }
733 spin_unlock(&dcache_lru_lock);
686 spin_unlock(&dcache_lock); 734 spin_unlock(&dcache_lock);
687} 735}
688EXPORT_SYMBOL(shrink_dcache_sb); 736EXPORT_SYMBOL(shrink_dcache_sb);
@@ -701,7 +749,9 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
701 749
702 /* detach this root from the system */ 750 /* detach this root from the system */
703 spin_lock(&dcache_lock); 751 spin_lock(&dcache_lock);
752 spin_lock(&dentry->d_lock);
704 dentry_lru_del(dentry); 753 dentry_lru_del(dentry);
754 spin_unlock(&dentry->d_lock);
705 __d_drop(dentry); 755 __d_drop(dentry);
706 spin_unlock(&dcache_lock); 756 spin_unlock(&dcache_lock);
707 757
@@ -715,7 +765,9 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
715 spin_lock(&dcache_lock); 765 spin_lock(&dcache_lock);
716 list_for_each_entry(loop, &dentry->d_subdirs, 766 list_for_each_entry(loop, &dentry->d_subdirs,
717 d_u.d_child) { 767 d_u.d_child) {
768 spin_lock(&loop->d_lock);
718 dentry_lru_del(loop); 769 dentry_lru_del(loop);
770 spin_unlock(&loop->d_lock);
719 __d_drop(loop); 771 __d_drop(loop);
720 cond_resched_lock(&dcache_lock); 772 cond_resched_lock(&dcache_lock);
721 } 773 }
@@ -892,6 +944,8 @@ resume:
892 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 944 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
893 next = tmp->next; 945 next = tmp->next;
894 946
947 spin_lock(&dentry->d_lock);
948
895 /* 949 /*
896 * move only zero ref count dentries to the end 950 * move only zero ref count dentries to the end
897 * of the unused list for prune_dcache 951 * of the unused list for prune_dcache
@@ -903,6 +957,8 @@ resume:
903 dentry_lru_del(dentry); 957 dentry_lru_del(dentry);
904 } 958 }
905 959
960 spin_unlock(&dentry->d_lock);
961
906 /* 962 /*
907 * We can return to the caller if we have found some (this 963 * We can return to the caller if we have found some (this
908 * ensures forward progress). We'll be coming back to find 964 * ensures forward progress). We'll be coming back to find