aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-14 06:19:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-14 06:19:59 -0400
commit8d7ccaa545490cdffdfaff0842436a8dd85cf47b (patch)
tree8129b5907161bc6ae26deb3645ce1e280c5e1f51 /fs/dcache.c
parentb2139aa0eec330c711c5a279db361e5ef1178e78 (diff)
parent30a2f3c60a84092c8084dfe788b710f8d0768cd4 (diff)
Merge commit 'v2.6.27-rc3' into x86/prototypes
Conflicts: include/asm-x86/dma-mapping.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c438
1 files changed, 283 insertions, 155 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 6068c25b393c..101663d15e9f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -61,7 +61,6 @@ static struct kmem_cache *dentry_cache __read_mostly;
61static unsigned int d_hash_mask __read_mostly; 61static unsigned int d_hash_mask __read_mostly;
62static unsigned int d_hash_shift __read_mostly; 62static unsigned int d_hash_shift __read_mostly;
63static struct hlist_head *dentry_hashtable __read_mostly; 63static struct hlist_head *dentry_hashtable __read_mostly;
64static LIST_HEAD(dentry_unused);
65 64
66/* Statistics gathering. */ 65/* Statistics gathering. */
67struct dentry_stat_t dentry_stat = { 66struct dentry_stat_t dentry_stat = {
@@ -96,14 +95,6 @@ static void d_free(struct dentry *dentry)
96 call_rcu(&dentry->d_u.d_rcu, d_callback); 95 call_rcu(&dentry->d_u.d_rcu, d_callback);
97} 96}
98 97
99static void dentry_lru_remove(struct dentry *dentry)
100{
101 if (!list_empty(&dentry->d_lru)) {
102 list_del_init(&dentry->d_lru);
103 dentry_stat.nr_unused--;
104 }
105}
106
107/* 98/*
108 * Release the dentry's inode, using the filesystem 99 * Release the dentry's inode, using the filesystem
109 * d_iput() operation if defined. 100 * d_iput() operation if defined.
@@ -130,6 +121,41 @@ static void dentry_iput(struct dentry * dentry)
130 } 121 }
131} 122}
132 123
124/*
125 * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held.
126 */
127static void dentry_lru_add(struct dentry *dentry)
128{
129 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
130 dentry->d_sb->s_nr_dentry_unused++;
131 dentry_stat.nr_unused++;
132}
133
134static void dentry_lru_add_tail(struct dentry *dentry)
135{
136 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
137 dentry->d_sb->s_nr_dentry_unused++;
138 dentry_stat.nr_unused++;
139}
140
141static void dentry_lru_del(struct dentry *dentry)
142{
143 if (!list_empty(&dentry->d_lru)) {
144 list_del(&dentry->d_lru);
145 dentry->d_sb->s_nr_dentry_unused--;
146 dentry_stat.nr_unused--;
147 }
148}
149
150static void dentry_lru_del_init(struct dentry *dentry)
151{
152 if (likely(!list_empty(&dentry->d_lru))) {
153 list_del_init(&dentry->d_lru);
154 dentry->d_sb->s_nr_dentry_unused--;
155 dentry_stat.nr_unused--;
156 }
157}
158
133/** 159/**
134 * d_kill - kill dentry and return parent 160 * d_kill - kill dentry and return parent
135 * @dentry: dentry to kill 161 * @dentry: dentry to kill
@@ -212,8 +238,7 @@ repeat:
212 goto kill_it; 238 goto kill_it;
213 if (list_empty(&dentry->d_lru)) { 239 if (list_empty(&dentry->d_lru)) {
214 dentry->d_flags |= DCACHE_REFERENCED; 240 dentry->d_flags |= DCACHE_REFERENCED;
215 list_add(&dentry->d_lru, &dentry_unused); 241 dentry_lru_add(dentry);
216 dentry_stat.nr_unused++;
217 } 242 }
218 spin_unlock(&dentry->d_lock); 243 spin_unlock(&dentry->d_lock);
219 spin_unlock(&dcache_lock); 244 spin_unlock(&dcache_lock);
@@ -222,7 +247,8 @@ repeat:
222unhash_it: 247unhash_it:
223 __d_drop(dentry); 248 __d_drop(dentry);
224kill_it: 249kill_it:
225 dentry_lru_remove(dentry); 250 /* if dentry was on the d_lru list delete it from there */
251 dentry_lru_del(dentry);
226 dentry = d_kill(dentry); 252 dentry = d_kill(dentry);
227 if (dentry) 253 if (dentry)
228 goto repeat; 254 goto repeat;
@@ -290,7 +316,7 @@ int d_invalidate(struct dentry * dentry)
290static inline struct dentry * __dget_locked(struct dentry *dentry) 316static inline struct dentry * __dget_locked(struct dentry *dentry)
291{ 317{
292 atomic_inc(&dentry->d_count); 318 atomic_inc(&dentry->d_count);
293 dentry_lru_remove(dentry); 319 dentry_lru_del_init(dentry);
294 return dentry; 320 return dentry;
295} 321}
296 322
@@ -406,133 +432,168 @@ static void prune_one_dentry(struct dentry * dentry)
406 432
407 if (dentry->d_op && dentry->d_op->d_delete) 433 if (dentry->d_op && dentry->d_op->d_delete)
408 dentry->d_op->d_delete(dentry); 434 dentry->d_op->d_delete(dentry);
409 dentry_lru_remove(dentry); 435 dentry_lru_del_init(dentry);
410 __d_drop(dentry); 436 __d_drop(dentry);
411 dentry = d_kill(dentry); 437 dentry = d_kill(dentry);
412 spin_lock(&dcache_lock); 438 spin_lock(&dcache_lock);
413 } 439 }
414} 440}
415 441
416/** 442/*
417 * prune_dcache - shrink the dcache 443 * Shrink the dentry LRU on a given superblock.
418 * @count: number of entries to try and free 444 * @sb : superblock to shrink dentry LRU.
419 * @sb: if given, ignore dentries for other superblocks 445 * @count: If count is NULL, we prune all dentries on superblock.
420 * which are being unmounted. 446 * @flags: If flags is non-zero, we need to do special processing based on
421 * 447 * which flags are set. This means we don't need to maintain multiple
422 * Shrink the dcache. This is done when we need 448 * similar copies of this loop.
423 * more memory, or simply when we need to unmount
424 * something (at which point we need to unuse
425 * all dentries).
426 *
427 * This function may fail to free any resources if
428 * all the dentries are in use.
429 */ 449 */
430 450static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
431static void prune_dcache(int count, struct super_block *sb)
432{ 451{
433 spin_lock(&dcache_lock); 452 LIST_HEAD(referenced);
434 for (; count ; count--) { 453 LIST_HEAD(tmp);
435 struct dentry *dentry; 454 struct dentry *dentry;
436 struct list_head *tmp; 455 int cnt = 0;
437 struct rw_semaphore *s_umount;
438
439 cond_resched_lock(&dcache_lock);
440 456
441 tmp = dentry_unused.prev; 457 BUG_ON(!sb);
442 if (sb) { 458 BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
443 /* Try to find a dentry for this sb, but don't try 459 spin_lock(&dcache_lock);
444 * too hard, if they aren't near the tail they will 460 if (count != NULL)
445 * be moved down again soon 461 /* called from prune_dcache() and shrink_dcache_parent() */
462 cnt = *count;
463restart:
464 if (count == NULL)
465 list_splice_init(&sb->s_dentry_lru, &tmp);
466 else {
467 while (!list_empty(&sb->s_dentry_lru)) {
468 dentry = list_entry(sb->s_dentry_lru.prev,
469 struct dentry, d_lru);
470 BUG_ON(dentry->d_sb != sb);
471
472 spin_lock(&dentry->d_lock);
473 /*
474 * If we are honouring the DCACHE_REFERENCED flag and
475 * the dentry has this flag set, don't free it. Clear
476 * the flag and put it back on the LRU.
446 */ 477 */
447 int skip = count; 478 if ((flags & DCACHE_REFERENCED)
448 while (skip && tmp != &dentry_unused && 479 && (dentry->d_flags & DCACHE_REFERENCED)) {
449 list_entry(tmp, struct dentry, d_lru)->d_sb != sb) { 480 dentry->d_flags &= ~DCACHE_REFERENCED;
450 skip--; 481 list_move_tail(&dentry->d_lru, &referenced);
451 tmp = tmp->prev; 482 spin_unlock(&dentry->d_lock);
483 } else {
484 list_move_tail(&dentry->d_lru, &tmp);
485 spin_unlock(&dentry->d_lock);
486 cnt--;
487 if (!cnt)
488 break;
452 } 489 }
490 cond_resched_lock(&dcache_lock);
453 } 491 }
454 if (tmp == &dentry_unused) 492 }
455 break; 493 while (!list_empty(&tmp)) {
456 list_del_init(tmp); 494 dentry = list_entry(tmp.prev, struct dentry, d_lru);
457 prefetch(dentry_unused.prev); 495 dentry_lru_del_init(dentry);
458 dentry_stat.nr_unused--; 496 spin_lock(&dentry->d_lock);
459 dentry = list_entry(tmp, struct dentry, d_lru);
460
461 spin_lock(&dentry->d_lock);
462 /* 497 /*
463 * We found an inuse dentry which was not removed from 498 * We found an inuse dentry which was not removed from
464 * dentry_unused because of laziness during lookup. Do not free 499 * the LRU because of laziness during lookup. Do not free
465 * it - just keep it off the dentry_unused list. 500 * it - just keep it off the LRU list.
466 */ 501 */
467 if (atomic_read(&dentry->d_count)) { 502 if (atomic_read(&dentry->d_count)) {
468 spin_unlock(&dentry->d_lock); 503 spin_unlock(&dentry->d_lock);
469 continue; 504 continue;
470 } 505 }
471 /* If the dentry was recently referenced, don't free it. */ 506 prune_one_dentry(dentry);
472 if (dentry->d_flags & DCACHE_REFERENCED) { 507 /* dentry->d_lock was dropped in prune_one_dentry() */
473 dentry->d_flags &= ~DCACHE_REFERENCED; 508 cond_resched_lock(&dcache_lock);
474 list_add(&dentry->d_lru, &dentry_unused); 509 }
475 dentry_stat.nr_unused++; 510 if (count == NULL && !list_empty(&sb->s_dentry_lru))
476 spin_unlock(&dentry->d_lock); 511 goto restart;
512 if (count != NULL)
513 *count = cnt;
514 if (!list_empty(&referenced))
515 list_splice(&referenced, &sb->s_dentry_lru);
516 spin_unlock(&dcache_lock);
517}
518
519/**
520 * prune_dcache - shrink the dcache
521 * @count: number of entries to try to free
522 *
523 * Shrink the dcache. This is done when we need more memory, or simply when we
524 * need to unmount something (at which point we need to unuse all dentries).
525 *
526 * This function may fail to free any resources if all the dentries are in use.
527 */
528static void prune_dcache(int count)
529{
530 struct super_block *sb;
531 int w_count;
532 int unused = dentry_stat.nr_unused;
533 int prune_ratio;
534 int pruned;
535
536 if (unused == 0 || count == 0)
537 return;
538 spin_lock(&dcache_lock);
539restart:
540 if (count >= unused)
541 prune_ratio = 1;
542 else
543 prune_ratio = unused / count;
544 spin_lock(&sb_lock);
545 list_for_each_entry(sb, &super_blocks, s_list) {
546 if (sb->s_nr_dentry_unused == 0)
477 continue; 547 continue;
478 } 548 sb->s_count++;
479 /* 549 /* Now, we reclaim unused dentrins with fairness.
480 * If the dentry is not DCACHED_REFERENCED, it is time 550 * We reclaim them same percentage from each superblock.
481 * to remove it from the dcache, provided the super block is 551 * We calculate number of dentries to scan on this sb
482 * NULL (which means we are trying to reclaim memory) 552 * as follows, but the implementation is arranged to avoid
483 * or this dentry belongs to the same super block that 553 * overflows:
484 * we want to shrink. 554 * number of dentries to scan on this sb =
555 * count * (number of dentries on this sb /
556 * number of dentries in the machine)
485 */ 557 */
558 spin_unlock(&sb_lock);
559 if (prune_ratio != 1)
560 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
561 else
562 w_count = sb->s_nr_dentry_unused;
563 pruned = w_count;
486 /* 564 /*
487 * If this dentry is for "my" filesystem, then I can prune it 565 * We need to be sure this filesystem isn't being unmounted,
488 * without taking the s_umount lock (I already hold it). 566 * otherwise we could race with generic_shutdown_super(), and
489 */ 567 * end up holding a reference to an inode while the filesystem
490 if (sb && dentry->d_sb == sb) { 568 * is unmounted. So we try to get s_umount, and make sure
491 prune_one_dentry(dentry); 569 * s_root isn't NULL.
492 continue;
493 }
494 /*
495 * ...otherwise we need to be sure this filesystem isn't being
496 * unmounted, otherwise we could race with
497 * generic_shutdown_super(), and end up holding a reference to
498 * an inode while the filesystem is unmounted.
499 * So we try to get s_umount, and make sure s_root isn't NULL.
500 * (Take a local copy of s_umount to avoid a use-after-free of
501 * `dentry').
502 */ 570 */
503 s_umount = &dentry->d_sb->s_umount; 571 if (down_read_trylock(&sb->s_umount)) {
504 if (down_read_trylock(s_umount)) { 572 if ((sb->s_root != NULL) &&
505 if (dentry->d_sb->s_root != NULL) { 573 (!list_empty(&sb->s_dentry_lru))) {
506 prune_one_dentry(dentry); 574 spin_unlock(&dcache_lock);
507 up_read(s_umount); 575 __shrink_dcache_sb(sb, &w_count,
508 continue; 576 DCACHE_REFERENCED);
577 pruned -= w_count;
578 spin_lock(&dcache_lock);
509 } 579 }
510 up_read(s_umount); 580 up_read(&sb->s_umount);
511 } 581 }
512 spin_unlock(&dentry->d_lock); 582 spin_lock(&sb_lock);
583 count -= pruned;
513 /* 584 /*
514 * Insert dentry at the head of the list as inserting at the 585 * restart only when sb is no longer on the list and
515 * tail leads to a cycle. 586 * we have more work to do.
516 */ 587 */
517 list_add(&dentry->d_lru, &dentry_unused); 588 if (__put_super_and_need_restart(sb) && count > 0) {
518 dentry_stat.nr_unused++; 589 spin_unlock(&sb_lock);
590 goto restart;
591 }
519 } 592 }
593 spin_unlock(&sb_lock);
520 spin_unlock(&dcache_lock); 594 spin_unlock(&dcache_lock);
521} 595}
522 596
523/*
524 * Shrink the dcache for the specified super block.
525 * This allows us to unmount a device without disturbing
526 * the dcache for the other devices.
527 *
528 * This implementation makes just two traversals of the
529 * unused list. On the first pass we move the selected
530 * dentries to the most recent end, and on the second
531 * pass we free them. The second pass must restart after
532 * each dput(), but since the target dentries are all at
533 * the end, it's really just a single traversal.
534 */
535
536/** 597/**
537 * shrink_dcache_sb - shrink dcache for a superblock 598 * shrink_dcache_sb - shrink dcache for a superblock
538 * @sb: superblock 599 * @sb: superblock
@@ -541,44 +602,9 @@ static void prune_dcache(int count, struct super_block *sb)
541 * is used to free the dcache before unmounting a file 602 * is used to free the dcache before unmounting a file
542 * system 603 * system
543 */ 604 */
544
545void shrink_dcache_sb(struct super_block * sb) 605void shrink_dcache_sb(struct super_block * sb)
546{ 606{
547 struct list_head *tmp, *next; 607 __shrink_dcache_sb(sb, NULL, 0);
548 struct dentry *dentry;
549
550 /*
551 * Pass one ... move the dentries for the specified
552 * superblock to the most recent end of the unused list.
553 */
554 spin_lock(&dcache_lock);
555 list_for_each_prev_safe(tmp, next, &dentry_unused) {
556 dentry = list_entry(tmp, struct dentry, d_lru);
557 if (dentry->d_sb != sb)
558 continue;
559 list_move_tail(tmp, &dentry_unused);
560 }
561
562 /*
563 * Pass two ... free the dentries for this superblock.
564 */
565repeat:
566 list_for_each_prev_safe(tmp, next, &dentry_unused) {
567 dentry = list_entry(tmp, struct dentry, d_lru);
568 if (dentry->d_sb != sb)
569 continue;
570 dentry_stat.nr_unused--;
571 list_del_init(tmp);
572 spin_lock(&dentry->d_lock);
573 if (atomic_read(&dentry->d_count)) {
574 spin_unlock(&dentry->d_lock);
575 continue;
576 }
577 prune_one_dentry(dentry);
578 cond_resched_lock(&dcache_lock);
579 goto repeat;
580 }
581 spin_unlock(&dcache_lock);
582} 608}
583 609
584/* 610/*
@@ -595,7 +621,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
595 621
596 /* detach this root from the system */ 622 /* detach this root from the system */
597 spin_lock(&dcache_lock); 623 spin_lock(&dcache_lock);
598 dentry_lru_remove(dentry); 624 dentry_lru_del_init(dentry);
599 __d_drop(dentry); 625 __d_drop(dentry);
600 spin_unlock(&dcache_lock); 626 spin_unlock(&dcache_lock);
601 627
@@ -609,7 +635,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
609 spin_lock(&dcache_lock); 635 spin_lock(&dcache_lock);
610 list_for_each_entry(loop, &dentry->d_subdirs, 636 list_for_each_entry(loop, &dentry->d_subdirs,
611 d_u.d_child) { 637 d_u.d_child) {
612 dentry_lru_remove(loop); 638 dentry_lru_del_init(loop);
613 __d_drop(loop); 639 __d_drop(loop);
614 cond_resched_lock(&dcache_lock); 640 cond_resched_lock(&dcache_lock);
615 } 641 }
@@ -791,14 +817,13 @@ resume:
791 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 817 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
792 next = tmp->next; 818 next = tmp->next;
793 819
794 dentry_lru_remove(dentry); 820 dentry_lru_del_init(dentry);
795 /* 821 /*
796 * move only zero ref count dentries to the end 822 * move only zero ref count dentries to the end
797 * of the unused list for prune_dcache 823 * of the unused list for prune_dcache
798 */ 824 */
799 if (!atomic_read(&dentry->d_count)) { 825 if (!atomic_read(&dentry->d_count)) {
800 list_add_tail(&dentry->d_lru, &dentry_unused); 826 dentry_lru_add_tail(dentry);
801 dentry_stat.nr_unused++;
802 found++; 827 found++;
803 } 828 }
804 829
@@ -840,10 +865,11 @@ out:
840 865
841void shrink_dcache_parent(struct dentry * parent) 866void shrink_dcache_parent(struct dentry * parent)
842{ 867{
868 struct super_block *sb = parent->d_sb;
843 int found; 869 int found;
844 870
845 while ((found = select_parent(parent)) != 0) 871 while ((found = select_parent(parent)) != 0)
846 prune_dcache(found, parent->d_sb); 872 __shrink_dcache_sb(sb, &found, 0);
847} 873}
848 874
849/* 875/*
@@ -863,7 +889,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
863 if (nr) { 889 if (nr) {
864 if (!(gfp_mask & __GFP_FS)) 890 if (!(gfp_mask & __GFP_FS))
865 return -1; 891 return -1;
866 prune_dcache(nr, NULL); 892 prune_dcache(nr);
867 } 893 }
868 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 894 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
869} 895}
@@ -1194,6 +1220,107 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1194 return new; 1220 return new;
1195} 1221}
1196 1222
1223/**
1224 * d_add_ci - lookup or allocate new dentry with case-exact name
1225 * @inode: the inode case-insensitive lookup has found
1226 * @dentry: the negative dentry that was passed to the parent's lookup func
1227 * @name: the case-exact name to be associated with the returned dentry
1228 *
1229 * This is to avoid filling the dcache with case-insensitive names to the
1230 * same inode, only the actual correct case is stored in the dcache for
1231 * case-insensitive filesystems.
1232 *
1233 * For a case-insensitive lookup match and if the the case-exact dentry
1234 * already exists in in the dcache, use it and return it.
1235 *
1236 * If no entry exists with the exact case name, allocate new dentry with
1237 * the exact case, and return the spliced entry.
1238 */
1239struct dentry *d_add_ci(struct inode *inode, struct dentry *dentry,
1240 struct qstr *name)
1241{
1242 int error;
1243 struct dentry *found;
1244 struct dentry *new;
1245
1246 /* Does a dentry matching the name exist already? */
1247 found = d_hash_and_lookup(dentry->d_parent, name);
1248 /* If not, create it now and return */
1249 if (!found) {
1250 new = d_alloc(dentry->d_parent, name);
1251 if (!new) {
1252 error = -ENOMEM;
1253 goto err_out;
1254 }
1255 found = d_splice_alias(inode, new);
1256 if (found) {
1257 dput(new);
1258 return found;
1259 }
1260 return new;
1261 }
1262 /* Matching dentry exists, check if it is negative. */
1263 if (found->d_inode) {
1264 if (unlikely(found->d_inode != inode)) {
1265 /* This can't happen because bad inodes are unhashed. */
1266 BUG_ON(!is_bad_inode(inode));
1267 BUG_ON(!is_bad_inode(found->d_inode));
1268 }
1269 /*
1270 * Already have the inode and the dentry attached, decrement
1271 * the reference count to balance the iget() done
1272 * earlier on. We found the dentry using d_lookup() so it
1273 * cannot be disconnected and thus we do not need to worry
1274 * about any NFS/disconnectedness issues here.
1275 */
1276 iput(inode);
1277 return found;
1278 }
1279 /*
1280 * Negative dentry: instantiate it unless the inode is a directory and
1281 * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED),
1282 * in which case d_move() that in place of the found dentry.
1283 */
1284 if (!S_ISDIR(inode->i_mode)) {
1285 /* Not a directory; everything is easy. */
1286 d_instantiate(found, inode);
1287 return found;
1288 }
1289 spin_lock(&dcache_lock);
1290 if (list_empty(&inode->i_dentry)) {
1291 /*
1292 * Directory without a 'disconnected' dentry; we need to do
1293 * d_instantiate() by hand because it takes dcache_lock which
1294 * we already hold.
1295 */
1296 list_add(&found->d_alias, &inode->i_dentry);
1297 found->d_inode = inode;
1298 spin_unlock(&dcache_lock);
1299 security_d_instantiate(found, inode);
1300 return found;
1301 }
1302 /*
1303 * Directory with a 'disconnected' dentry; get a reference to the
1304 * 'disconnected' dentry.
1305 */
1306 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1307 dget_locked(new);
1308 spin_unlock(&dcache_lock);
1309 /* Do security vodoo. */
1310 security_d_instantiate(found, inode);
1311 /* Move new in place of found. */
1312 d_move(new, found);
1313 /* Balance the iget() we did above. */
1314 iput(inode);
1315 /* Throw away found. */
1316 dput(found);
1317 /* Use new as the actual dentry. */
1318 return new;
1319
1320err_out:
1321 iput(inode);
1322 return ERR_PTR(error);
1323}
1197 1324
1198/** 1325/**
1199 * d_lookup - search for a dentry 1326 * d_lookup - search for a dentry
@@ -1215,7 +1342,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1215 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while 1342 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
1216 * lookup is going on. 1343 * lookup is going on.
1217 * 1344 *
1218 * dentry_unused list is not updated even if lookup finds the required dentry 1345 * The dentry unused LRU is not updated even if lookup finds the required dentry
1219 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, 1346 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
1220 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock 1347 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock
1221 * acquisition. 1348 * acquisition.
@@ -2228,6 +2355,7 @@ EXPORT_SYMBOL(d_path);
2228EXPORT_SYMBOL(d_prune_aliases); 2355EXPORT_SYMBOL(d_prune_aliases);
2229EXPORT_SYMBOL(d_rehash); 2356EXPORT_SYMBOL(d_rehash);
2230EXPORT_SYMBOL(d_splice_alias); 2357EXPORT_SYMBOL(d_splice_alias);
2358EXPORT_SYMBOL(d_add_ci);
2231EXPORT_SYMBOL(d_validate); 2359EXPORT_SYMBOL(d_validate);
2232EXPORT_SYMBOL(dget_locked); 2360EXPORT_SYMBOL(dget_locked);
2233EXPORT_SYMBOL(dput); 2361EXPORT_SYMBOL(dput);