diff options
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 277 |
1 files changed, 144 insertions, 133 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 83293be48149..23702a9d4e6d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -67,33 +67,43 @@ struct dentry_stat_t dentry_stat = { | |||
67 | .age_limit = 45, | 67 | .age_limit = 45, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | static void __d_free(struct dentry *dentry) | 70 | static struct percpu_counter nr_dentry __cacheline_aligned_in_smp; |
71 | static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp; | ||
72 | |||
73 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) | ||
74 | int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, | ||
75 | size_t *lenp, loff_t *ppos) | ||
76 | { | ||
77 | dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry); | ||
78 | dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); | ||
79 | return proc_dointvec(table, write, buffer, lenp, ppos); | ||
80 | } | ||
81 | #endif | ||
82 | |||
83 | static void __d_free(struct rcu_head *head) | ||
71 | { | 84 | { |
85 | struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); | ||
86 | |||
72 | WARN_ON(!list_empty(&dentry->d_alias)); | 87 | WARN_ON(!list_empty(&dentry->d_alias)); |
73 | if (dname_external(dentry)) | 88 | if (dname_external(dentry)) |
74 | kfree(dentry->d_name.name); | 89 | kfree(dentry->d_name.name); |
75 | kmem_cache_free(dentry_cache, dentry); | 90 | kmem_cache_free(dentry_cache, dentry); |
76 | } | 91 | } |
77 | 92 | ||
78 | static void d_callback(struct rcu_head *head) | ||
79 | { | ||
80 | struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); | ||
81 | __d_free(dentry); | ||
82 | } | ||
83 | |||
84 | /* | 93 | /* |
85 | * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry | 94 | * no dcache_lock, please. |
86 | * inside dcache_lock. | ||
87 | */ | 95 | */ |
88 | static void d_free(struct dentry *dentry) | 96 | static void d_free(struct dentry *dentry) |
89 | { | 97 | { |
98 | percpu_counter_dec(&nr_dentry); | ||
90 | if (dentry->d_op && dentry->d_op->d_release) | 99 | if (dentry->d_op && dentry->d_op->d_release) |
91 | dentry->d_op->d_release(dentry); | 100 | dentry->d_op->d_release(dentry); |
101 | |||
92 | /* if dentry was never inserted into hash, immediate free is OK */ | 102 | /* if dentry was never inserted into hash, immediate free is OK */ |
93 | if (hlist_unhashed(&dentry->d_hash)) | 103 | if (hlist_unhashed(&dentry->d_hash)) |
94 | __d_free(dentry); | 104 | __d_free(&dentry->d_u.d_rcu); |
95 | else | 105 | else |
96 | call_rcu(&dentry->d_u.d_rcu, d_callback); | 106 | call_rcu(&dentry->d_u.d_rcu, __d_free); |
97 | } | 107 | } |
98 | 108 | ||
99 | /* | 109 | /* |
@@ -123,37 +133,34 @@ static void dentry_iput(struct dentry * dentry) | |||
123 | } | 133 | } |
124 | 134 | ||
125 | /* | 135 | /* |
126 | * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held. | 136 | * dentry_lru_(add|del|move_tail) must be called with dcache_lock held. |
127 | */ | 137 | */ |
128 | static void dentry_lru_add(struct dentry *dentry) | 138 | static void dentry_lru_add(struct dentry *dentry) |
129 | { | 139 | { |
130 | list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); | 140 | if (list_empty(&dentry->d_lru)) { |
131 | dentry->d_sb->s_nr_dentry_unused++; | 141 | list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); |
132 | dentry_stat.nr_unused++; | 142 | dentry->d_sb->s_nr_dentry_unused++; |
133 | } | 143 | percpu_counter_inc(&nr_dentry_unused); |
134 | 144 | } | |
135 | static void dentry_lru_add_tail(struct dentry *dentry) | ||
136 | { | ||
137 | list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); | ||
138 | dentry->d_sb->s_nr_dentry_unused++; | ||
139 | dentry_stat.nr_unused++; | ||
140 | } | 145 | } |
141 | 146 | ||
142 | static void dentry_lru_del(struct dentry *dentry) | 147 | static void dentry_lru_del(struct dentry *dentry) |
143 | { | 148 | { |
144 | if (!list_empty(&dentry->d_lru)) { | 149 | if (!list_empty(&dentry->d_lru)) { |
145 | list_del(&dentry->d_lru); | 150 | list_del_init(&dentry->d_lru); |
146 | dentry->d_sb->s_nr_dentry_unused--; | 151 | dentry->d_sb->s_nr_dentry_unused--; |
147 | dentry_stat.nr_unused--; | 152 | percpu_counter_dec(&nr_dentry_unused); |
148 | } | 153 | } |
149 | } | 154 | } |
150 | 155 | ||
151 | static void dentry_lru_del_init(struct dentry *dentry) | 156 | static void dentry_lru_move_tail(struct dentry *dentry) |
152 | { | 157 | { |
153 | if (likely(!list_empty(&dentry->d_lru))) { | 158 | if (list_empty(&dentry->d_lru)) { |
154 | list_del_init(&dentry->d_lru); | 159 | list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); |
155 | dentry->d_sb->s_nr_dentry_unused--; | 160 | dentry->d_sb->s_nr_dentry_unused++; |
156 | dentry_stat.nr_unused--; | 161 | percpu_counter_inc(&nr_dentry_unused); |
162 | } else { | ||
163 | list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); | ||
157 | } | 164 | } |
158 | } | 165 | } |
159 | 166 | ||
@@ -172,7 +179,6 @@ static struct dentry *d_kill(struct dentry *dentry) | |||
172 | struct dentry *parent; | 179 | struct dentry *parent; |
173 | 180 | ||
174 | list_del(&dentry->d_u.d_child); | 181 | list_del(&dentry->d_u.d_child); |
175 | dentry_stat.nr_dentry--; /* For d_free, below */ | ||
176 | /*drops the locks, at that point nobody can reach this dentry */ | 182 | /*drops the locks, at that point nobody can reach this dentry */ |
177 | dentry_iput(dentry); | 183 | dentry_iput(dentry); |
178 | if (IS_ROOT(dentry)) | 184 | if (IS_ROOT(dentry)) |
@@ -237,13 +243,15 @@ repeat: | |||
237 | if (dentry->d_op->d_delete(dentry)) | 243 | if (dentry->d_op->d_delete(dentry)) |
238 | goto unhash_it; | 244 | goto unhash_it; |
239 | } | 245 | } |
246 | |||
240 | /* Unreachable? Get rid of it */ | 247 | /* Unreachable? Get rid of it */ |
241 | if (d_unhashed(dentry)) | 248 | if (d_unhashed(dentry)) |
242 | goto kill_it; | 249 | goto kill_it; |
243 | if (list_empty(&dentry->d_lru)) { | 250 | |
244 | dentry->d_flags |= DCACHE_REFERENCED; | 251 | /* Otherwise leave it cached and ensure it's on the LRU */ |
245 | dentry_lru_add(dentry); | 252 | dentry->d_flags |= DCACHE_REFERENCED; |
246 | } | 253 | dentry_lru_add(dentry); |
254 | |||
247 | spin_unlock(&dentry->d_lock); | 255 | spin_unlock(&dentry->d_lock); |
248 | spin_unlock(&dcache_lock); | 256 | spin_unlock(&dcache_lock); |
249 | return; | 257 | return; |
@@ -318,11 +326,10 @@ int d_invalidate(struct dentry * dentry) | |||
318 | EXPORT_SYMBOL(d_invalidate); | 326 | EXPORT_SYMBOL(d_invalidate); |
319 | 327 | ||
320 | /* This should be called _only_ with dcache_lock held */ | 328 | /* This should be called _only_ with dcache_lock held */ |
321 | |||
322 | static inline struct dentry * __dget_locked(struct dentry *dentry) | 329 | static inline struct dentry * __dget_locked(struct dentry *dentry) |
323 | { | 330 | { |
324 | atomic_inc(&dentry->d_count); | 331 | atomic_inc(&dentry->d_count); |
325 | dentry_lru_del_init(dentry); | 332 | dentry_lru_del(dentry); |
326 | return dentry; | 333 | return dentry; |
327 | } | 334 | } |
328 | 335 | ||
@@ -441,73 +448,27 @@ static void prune_one_dentry(struct dentry * dentry) | |||
441 | 448 | ||
442 | if (dentry->d_op && dentry->d_op->d_delete) | 449 | if (dentry->d_op && dentry->d_op->d_delete) |
443 | dentry->d_op->d_delete(dentry); | 450 | dentry->d_op->d_delete(dentry); |
444 | dentry_lru_del_init(dentry); | 451 | dentry_lru_del(dentry); |
445 | __d_drop(dentry); | 452 | __d_drop(dentry); |
446 | dentry = d_kill(dentry); | 453 | dentry = d_kill(dentry); |
447 | spin_lock(&dcache_lock); | 454 | spin_lock(&dcache_lock); |
448 | } | 455 | } |
449 | } | 456 | } |
450 | 457 | ||
451 | /* | 458 | static void shrink_dentry_list(struct list_head *list) |
452 | * Shrink the dentry LRU on a given superblock. | ||
453 | * @sb : superblock to shrink dentry LRU. | ||
454 | * @count: If count is NULL, we prune all dentries on superblock. | ||
455 | * @flags: If flags is non-zero, we need to do special processing based on | ||
456 | * which flags are set. This means we don't need to maintain multiple | ||
457 | * similar copies of this loop. | ||
458 | */ | ||
459 | static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) | ||
460 | { | 459 | { |
461 | LIST_HEAD(referenced); | ||
462 | LIST_HEAD(tmp); | ||
463 | struct dentry *dentry; | 460 | struct dentry *dentry; |
464 | int cnt = 0; | ||
465 | 461 | ||
466 | BUG_ON(!sb); | 462 | while (!list_empty(list)) { |
467 | BUG_ON((flags & DCACHE_REFERENCED) && count == NULL); | 463 | dentry = list_entry(list->prev, struct dentry, d_lru); |
468 | spin_lock(&dcache_lock); | 464 | dentry_lru_del(dentry); |
469 | if (count != NULL) | ||
470 | /* called from prune_dcache() and shrink_dcache_parent() */ | ||
471 | cnt = *count; | ||
472 | restart: | ||
473 | if (count == NULL) | ||
474 | list_splice_init(&sb->s_dentry_lru, &tmp); | ||
475 | else { | ||
476 | while (!list_empty(&sb->s_dentry_lru)) { | ||
477 | dentry = list_entry(sb->s_dentry_lru.prev, | ||
478 | struct dentry, d_lru); | ||
479 | BUG_ON(dentry->d_sb != sb); | ||
480 | 465 | ||
481 | spin_lock(&dentry->d_lock); | ||
482 | /* | ||
483 | * If we are honouring the DCACHE_REFERENCED flag and | ||
484 | * the dentry has this flag set, don't free it. Clear | ||
485 | * the flag and put it back on the LRU. | ||
486 | */ | ||
487 | if ((flags & DCACHE_REFERENCED) | ||
488 | && (dentry->d_flags & DCACHE_REFERENCED)) { | ||
489 | dentry->d_flags &= ~DCACHE_REFERENCED; | ||
490 | list_move(&dentry->d_lru, &referenced); | ||
491 | spin_unlock(&dentry->d_lock); | ||
492 | } else { | ||
493 | list_move_tail(&dentry->d_lru, &tmp); | ||
494 | spin_unlock(&dentry->d_lock); | ||
495 | cnt--; | ||
496 | if (!cnt) | ||
497 | break; | ||
498 | } | ||
499 | cond_resched_lock(&dcache_lock); | ||
500 | } | ||
501 | } | ||
502 | while (!list_empty(&tmp)) { | ||
503 | dentry = list_entry(tmp.prev, struct dentry, d_lru); | ||
504 | dentry_lru_del_init(dentry); | ||
505 | spin_lock(&dentry->d_lock); | ||
506 | /* | 466 | /* |
507 | * We found an inuse dentry which was not removed from | 467 | * We found an inuse dentry which was not removed from |
508 | * the LRU because of laziness during lookup. Do not free | 468 | * the LRU because of laziness during lookup. Do not free |
509 | * it - just keep it off the LRU list. | 469 | * it - just keep it off the LRU list. |
510 | */ | 470 | */ |
471 | spin_lock(&dentry->d_lock); | ||
511 | if (atomic_read(&dentry->d_count)) { | 472 | if (atomic_read(&dentry->d_count)) { |
512 | spin_unlock(&dentry->d_lock); | 473 | spin_unlock(&dentry->d_lock); |
513 | continue; | 474 | continue; |
@@ -516,13 +477,60 @@ restart: | |||
516 | /* dentry->d_lock was dropped in prune_one_dentry() */ | 477 | /* dentry->d_lock was dropped in prune_one_dentry() */ |
517 | cond_resched_lock(&dcache_lock); | 478 | cond_resched_lock(&dcache_lock); |
518 | } | 479 | } |
519 | if (count == NULL && !list_empty(&sb->s_dentry_lru)) | 480 | } |
520 | goto restart; | 481 | |
521 | if (count != NULL) | 482 | /** |
522 | *count = cnt; | 483 | * __shrink_dcache_sb - shrink the dentry LRU on a given superblock |
484 | * @sb: superblock to shrink dentry LRU. | ||
485 | * @count: number of entries to prune | ||
486 | * @flags: flags to control the dentry processing | ||
487 | * | ||
488 | * If flags contains DCACHE_REFERENCED reference dentries will not be pruned. | ||
489 | */ | ||
490 | static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) | ||
491 | { | ||
492 | /* called from prune_dcache() and shrink_dcache_parent() */ | ||
493 | struct dentry *dentry; | ||
494 | LIST_HEAD(referenced); | ||
495 | LIST_HEAD(tmp); | ||
496 | int cnt = *count; | ||
497 | |||
498 | spin_lock(&dcache_lock); | ||
499 | while (!list_empty(&sb->s_dentry_lru)) { | ||
500 | dentry = list_entry(sb->s_dentry_lru.prev, | ||
501 | struct dentry, d_lru); | ||
502 | BUG_ON(dentry->d_sb != sb); | ||
503 | |||
504 | /* | ||
505 | * If we are honouring the DCACHE_REFERENCED flag and the | ||
506 | * dentry has this flag set, don't free it. Clear the flag | ||
507 | * and put it back on the LRU. | ||
508 | */ | ||
509 | if (flags & DCACHE_REFERENCED) { | ||
510 | spin_lock(&dentry->d_lock); | ||
511 | if (dentry->d_flags & DCACHE_REFERENCED) { | ||
512 | dentry->d_flags &= ~DCACHE_REFERENCED; | ||
513 | list_move(&dentry->d_lru, &referenced); | ||
514 | spin_unlock(&dentry->d_lock); | ||
515 | cond_resched_lock(&dcache_lock); | ||
516 | continue; | ||
517 | } | ||
518 | spin_unlock(&dentry->d_lock); | ||
519 | } | ||
520 | |||
521 | list_move_tail(&dentry->d_lru, &tmp); | ||
522 | if (!--cnt) | ||
523 | break; | ||
524 | cond_resched_lock(&dcache_lock); | ||
525 | } | ||
526 | |||
527 | *count = cnt; | ||
528 | shrink_dentry_list(&tmp); | ||
529 | |||
523 | if (!list_empty(&referenced)) | 530 | if (!list_empty(&referenced)) |
524 | list_splice(&referenced, &sb->s_dentry_lru); | 531 | list_splice(&referenced, &sb->s_dentry_lru); |
525 | spin_unlock(&dcache_lock); | 532 | spin_unlock(&dcache_lock); |
533 | |||
526 | } | 534 | } |
527 | 535 | ||
528 | /** | 536 | /** |
@@ -538,7 +546,7 @@ static void prune_dcache(int count) | |||
538 | { | 546 | { |
539 | struct super_block *sb, *p = NULL; | 547 | struct super_block *sb, *p = NULL; |
540 | int w_count; | 548 | int w_count; |
541 | int unused = dentry_stat.nr_unused; | 549 | int unused = percpu_counter_sum_positive(&nr_dentry_unused); |
542 | int prune_ratio; | 550 | int prune_ratio; |
543 | int pruned; | 551 | int pruned; |
544 | 552 | ||
@@ -608,13 +616,19 @@ static void prune_dcache(int count) | |||
608 | * shrink_dcache_sb - shrink dcache for a superblock | 616 | * shrink_dcache_sb - shrink dcache for a superblock |
609 | * @sb: superblock | 617 | * @sb: superblock |
610 | * | 618 | * |
611 | * Shrink the dcache for the specified super block. This | 619 | * Shrink the dcache for the specified super block. This is used to free |
612 | * is used to free the dcache before unmounting a file | 620 | * the dcache before unmounting a file system. |
613 | * system | ||
614 | */ | 621 | */ |
615 | void shrink_dcache_sb(struct super_block * sb) | 622 | void shrink_dcache_sb(struct super_block *sb) |
616 | { | 623 | { |
617 | __shrink_dcache_sb(sb, NULL, 0); | 624 | LIST_HEAD(tmp); |
625 | |||
626 | spin_lock(&dcache_lock); | ||
627 | while (!list_empty(&sb->s_dentry_lru)) { | ||
628 | list_splice_init(&sb->s_dentry_lru, &tmp); | ||
629 | shrink_dentry_list(&tmp); | ||
630 | } | ||
631 | spin_unlock(&dcache_lock); | ||
618 | } | 632 | } |
619 | EXPORT_SYMBOL(shrink_dcache_sb); | 633 | EXPORT_SYMBOL(shrink_dcache_sb); |
620 | 634 | ||
@@ -632,7 +646,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
632 | 646 | ||
633 | /* detach this root from the system */ | 647 | /* detach this root from the system */ |
634 | spin_lock(&dcache_lock); | 648 | spin_lock(&dcache_lock); |
635 | dentry_lru_del_init(dentry); | 649 | dentry_lru_del(dentry); |
636 | __d_drop(dentry); | 650 | __d_drop(dentry); |
637 | spin_unlock(&dcache_lock); | 651 | spin_unlock(&dcache_lock); |
638 | 652 | ||
@@ -646,7 +660,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
646 | spin_lock(&dcache_lock); | 660 | spin_lock(&dcache_lock); |
647 | list_for_each_entry(loop, &dentry->d_subdirs, | 661 | list_for_each_entry(loop, &dentry->d_subdirs, |
648 | d_u.d_child) { | 662 | d_u.d_child) { |
649 | dentry_lru_del_init(loop); | 663 | dentry_lru_del(loop); |
650 | __d_drop(loop); | 664 | __d_drop(loop); |
651 | cond_resched_lock(&dcache_lock); | 665 | cond_resched_lock(&dcache_lock); |
652 | } | 666 | } |
@@ -703,20 +717,13 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
703 | * otherwise we ascend to the parent and move to the | 717 | * otherwise we ascend to the parent and move to the |
704 | * next sibling if there is one */ | 718 | * next sibling if there is one */ |
705 | if (!parent) | 719 | if (!parent) |
706 | goto out; | 720 | return; |
707 | |||
708 | dentry = parent; | 721 | dentry = parent; |
709 | |||
710 | } while (list_empty(&dentry->d_subdirs)); | 722 | } while (list_empty(&dentry->d_subdirs)); |
711 | 723 | ||
712 | dentry = list_entry(dentry->d_subdirs.next, | 724 | dentry = list_entry(dentry->d_subdirs.next, |
713 | struct dentry, d_u.d_child); | 725 | struct dentry, d_u.d_child); |
714 | } | 726 | } |
715 | out: | ||
716 | /* several dentries were freed, need to correct nr_dentry */ | ||
717 | spin_lock(&dcache_lock); | ||
718 | dentry_stat.nr_dentry -= detached; | ||
719 | spin_unlock(&dcache_lock); | ||
720 | } | 727 | } |
721 | 728 | ||
722 | /* | 729 | /* |
@@ -830,14 +837,15 @@ resume: | |||
830 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); | 837 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
831 | next = tmp->next; | 838 | next = tmp->next; |
832 | 839 | ||
833 | dentry_lru_del_init(dentry); | ||
834 | /* | 840 | /* |
835 | * move only zero ref count dentries to the end | 841 | * move only zero ref count dentries to the end |
836 | * of the unused list for prune_dcache | 842 | * of the unused list for prune_dcache |
837 | */ | 843 | */ |
838 | if (!atomic_read(&dentry->d_count)) { | 844 | if (!atomic_read(&dentry->d_count)) { |
839 | dentry_lru_add_tail(dentry); | 845 | dentry_lru_move_tail(dentry); |
840 | found++; | 846 | found++; |
847 | } else { | ||
848 | dentry_lru_del(dentry); | ||
841 | } | 849 | } |
842 | 850 | ||
843 | /* | 851 | /* |
@@ -900,12 +908,16 @@ EXPORT_SYMBOL(shrink_dcache_parent); | |||
900 | */ | 908 | */ |
901 | static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) | 909 | static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
902 | { | 910 | { |
911 | int nr_unused; | ||
912 | |||
903 | if (nr) { | 913 | if (nr) { |
904 | if (!(gfp_mask & __GFP_FS)) | 914 | if (!(gfp_mask & __GFP_FS)) |
905 | return -1; | 915 | return -1; |
906 | prune_dcache(nr); | 916 | prune_dcache(nr); |
907 | } | 917 | } |
908 | return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; | 918 | |
919 | nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); | ||
920 | return (nr_unused / 100) * sysctl_vfs_cache_pressure; | ||
909 | } | 921 | } |
910 | 922 | ||
911 | static struct shrinker dcache_shrinker = { | 923 | static struct shrinker dcache_shrinker = { |
@@ -972,9 +984,10 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) | |||
972 | spin_lock(&dcache_lock); | 984 | spin_lock(&dcache_lock); |
973 | if (parent) | 985 | if (parent) |
974 | list_add(&dentry->d_u.d_child, &parent->d_subdirs); | 986 | list_add(&dentry->d_u.d_child, &parent->d_subdirs); |
975 | dentry_stat.nr_dentry++; | ||
976 | spin_unlock(&dcache_lock); | 987 | spin_unlock(&dcache_lock); |
977 | 988 | ||
989 | percpu_counter_inc(&nr_dentry); | ||
990 | |||
978 | return dentry; | 991 | return dentry; |
979 | } | 992 | } |
980 | EXPORT_SYMBOL(d_alloc); | 993 | EXPORT_SYMBOL(d_alloc); |
@@ -1478,33 +1491,26 @@ out: | |||
1478 | * This is used by ncpfs in its readdir implementation. | 1491 | * This is used by ncpfs in its readdir implementation. |
1479 | * Zero is returned in the dentry is invalid. | 1492 | * Zero is returned in the dentry is invalid. |
1480 | */ | 1493 | */ |
1481 | 1494 | int d_validate(struct dentry *dentry, struct dentry *parent) | |
1482 | int d_validate(struct dentry *dentry, struct dentry *dparent) | ||
1483 | { | 1495 | { |
1484 | struct hlist_head *base; | 1496 | struct hlist_head *head = d_hash(parent, dentry->d_name.hash); |
1485 | struct hlist_node *lhp; | 1497 | struct hlist_node *node; |
1498 | struct dentry *d; | ||
1486 | 1499 | ||
1487 | /* Check whether the ptr might be valid at all.. */ | 1500 | /* Check whether the ptr might be valid at all.. */ |
1488 | if (!kmem_ptr_validate(dentry_cache, dentry)) | 1501 | if (!kmem_ptr_validate(dentry_cache, dentry)) |
1489 | goto out; | 1502 | return 0; |
1490 | 1503 | if (dentry->d_parent != parent) | |
1491 | if (dentry->d_parent != dparent) | 1504 | return 0; |
1492 | goto out; | ||
1493 | 1505 | ||
1494 | spin_lock(&dcache_lock); | 1506 | rcu_read_lock(); |
1495 | base = d_hash(dparent, dentry->d_name.hash); | 1507 | hlist_for_each_entry_rcu(d, node, head, d_hash) { |
1496 | hlist_for_each(lhp,base) { | 1508 | if (d == dentry) { |
1497 | /* hlist_for_each_entry_rcu() not required for d_hash list | 1509 | dget(dentry); |
1498 | * as it is parsed under dcache_lock | ||
1499 | */ | ||
1500 | if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { | ||
1501 | __dget_locked(dentry); | ||
1502 | spin_unlock(&dcache_lock); | ||
1503 | return 1; | 1510 | return 1; |
1504 | } | 1511 | } |
1505 | } | 1512 | } |
1506 | spin_unlock(&dcache_lock); | 1513 | rcu_read_unlock(); |
1507 | out: | ||
1508 | return 0; | 1514 | return 0; |
1509 | } | 1515 | } |
1510 | EXPORT_SYMBOL(d_validate); | 1516 | EXPORT_SYMBOL(d_validate); |
@@ -1994,7 +2000,7 @@ global_root: | |||
1994 | * Returns a pointer into the buffer or an error code if the | 2000 | * Returns a pointer into the buffer or an error code if the |
1995 | * path was too long. | 2001 | * path was too long. |
1996 | * | 2002 | * |
1997 | * "buflen" should be positive. Caller holds the dcache_lock. | 2003 | * "buflen" should be positive. |
1998 | * | 2004 | * |
1999 | * If path is not reachable from the supplied root, then the value of | 2005 | * If path is not reachable from the supplied root, then the value of |
2000 | * root is changed (without modifying refcounts). | 2006 | * root is changed (without modifying refcounts). |
@@ -2006,10 +2012,12 @@ char *__d_path(const struct path *path, struct path *root, | |||
2006 | int error; | 2012 | int error; |
2007 | 2013 | ||
2008 | prepend(&res, &buflen, "\0", 1); | 2014 | prepend(&res, &buflen, "\0", 1); |
2015 | spin_lock(&dcache_lock); | ||
2009 | error = prepend_path(path, root, &res, &buflen); | 2016 | error = prepend_path(path, root, &res, &buflen); |
2017 | spin_unlock(&dcache_lock); | ||
2018 | |||
2010 | if (error) | 2019 | if (error) |
2011 | return ERR_PTR(error); | 2020 | return ERR_PTR(error); |
2012 | |||
2013 | return res; | 2021 | return res; |
2014 | } | 2022 | } |
2015 | 2023 | ||
@@ -2419,6 +2427,9 @@ static void __init dcache_init(void) | |||
2419 | { | 2427 | { |
2420 | int loop; | 2428 | int loop; |
2421 | 2429 | ||
2430 | percpu_counter_init(&nr_dentry, 0); | ||
2431 | percpu_counter_init(&nr_dentry_unused, 0); | ||
2432 | |||
2422 | /* | 2433 | /* |
2423 | * A constructor could be added for stable state like the lists, | 2434 | * A constructor could be added for stable state like the lists, |
2424 | * but it is probably not worth it because of the cache nature | 2435 | * but it is probably not worth it because of the cache nature |