diff options
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 433 |
1 files changed, 190 insertions, 243 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 40707d88a945..be2bea834bf4 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -246,16 +246,8 @@ static void __d_free(struct rcu_head *head) | |||
246 | kmem_cache_free(dentry_cache, dentry); | 246 | kmem_cache_free(dentry_cache, dentry); |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | 249 | static void dentry_free(struct dentry *dentry) |
250 | * no locks, please. | ||
251 | */ | ||
252 | static void d_free(struct dentry *dentry) | ||
253 | { | 250 | { |
254 | BUG_ON((int)dentry->d_lockref.count > 0); | ||
255 | this_cpu_dec(nr_dentry); | ||
256 | if (dentry->d_op && dentry->d_op->d_release) | ||
257 | dentry->d_op->d_release(dentry); | ||
258 | |||
259 | /* if dentry was never visible to RCU, immediate free is OK */ | 251 | /* if dentry was never visible to RCU, immediate free is OK */ |
260 | if (!(dentry->d_flags & DCACHE_RCUACCESS)) | 252 | if (!(dentry->d_flags & DCACHE_RCUACCESS)) |
261 | __d_free(&dentry->d_u.d_rcu); | 253 | __d_free(&dentry->d_u.d_rcu); |
@@ -403,56 +395,6 @@ static void dentry_lru_add(struct dentry *dentry) | |||
403 | d_lru_add(dentry); | 395 | d_lru_add(dentry); |
404 | } | 396 | } |
405 | 397 | ||
406 | /* | ||
407 | * Remove a dentry with references from the LRU. | ||
408 | * | ||
409 | * If we are on the shrink list, then we can get to try_prune_one_dentry() and | ||
410 | * lose our last reference through the parent walk. In this case, we need to | ||
411 | * remove ourselves from the shrink list, not the LRU. | ||
412 | */ | ||
413 | static void dentry_lru_del(struct dentry *dentry) | ||
414 | { | ||
415 | if (dentry->d_flags & DCACHE_LRU_LIST) { | ||
416 | if (dentry->d_flags & DCACHE_SHRINK_LIST) | ||
417 | return d_shrink_del(dentry); | ||
418 | d_lru_del(dentry); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * d_kill - kill dentry and return parent | ||
424 | * @dentry: dentry to kill | ||
425 | * @parent: parent dentry | ||
426 | * | ||
427 | * The dentry must already be unhashed and removed from the LRU. | ||
428 | * | ||
429 | * If this is the root of the dentry tree, return NULL. | ||
430 | * | ||
431 | * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by | ||
432 | * d_kill. | ||
433 | */ | ||
434 | static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) | ||
435 | __releases(dentry->d_lock) | ||
436 | __releases(parent->d_lock) | ||
437 | __releases(dentry->d_inode->i_lock) | ||
438 | { | ||
439 | list_del(&dentry->d_u.d_child); | ||
440 | /* | ||
441 | * Inform d_walk() that we are no longer attached to the | ||
442 | * dentry tree | ||
443 | */ | ||
444 | dentry->d_flags |= DCACHE_DENTRY_KILLED; | ||
445 | if (parent) | ||
446 | spin_unlock(&parent->d_lock); | ||
447 | dentry_iput(dentry); | ||
448 | /* | ||
449 | * dentry_iput drops the locks, at which point nobody (except | ||
450 | * transient RCU lookups) can reach this dentry. | ||
451 | */ | ||
452 | d_free(dentry); | ||
453 | return parent; | ||
454 | } | ||
455 | |||
456 | /** | 398 | /** |
457 | * d_drop - drop a dentry | 399 | * d_drop - drop a dentry |
458 | * @dentry: dentry to drop | 400 | * @dentry: dentry to drop |
@@ -499,37 +441,12 @@ void d_drop(struct dentry *dentry) | |||
499 | } | 441 | } |
500 | EXPORT_SYMBOL(d_drop); | 442 | EXPORT_SYMBOL(d_drop); |
501 | 443 | ||
502 | /* | 444 | static void __dentry_kill(struct dentry *dentry) |
503 | * Finish off a dentry we've decided to kill. | ||
504 | * dentry->d_lock must be held, returns with it unlocked. | ||
505 | * If ref is non-zero, then decrement the refcount too. | ||
506 | * Returns dentry requiring refcount drop, or NULL if we're done. | ||
507 | */ | ||
508 | static struct dentry * | ||
509 | dentry_kill(struct dentry *dentry, int unlock_on_failure) | ||
510 | __releases(dentry->d_lock) | ||
511 | { | 445 | { |
512 | struct inode *inode; | 446 | struct dentry *parent = NULL; |
513 | struct dentry *parent; | 447 | bool can_free = true; |
514 | 448 | if (!IS_ROOT(dentry)) | |
515 | inode = dentry->d_inode; | ||
516 | if (inode && !spin_trylock(&inode->i_lock)) { | ||
517 | relock: | ||
518 | if (unlock_on_failure) { | ||
519 | spin_unlock(&dentry->d_lock); | ||
520 | cpu_relax(); | ||
521 | } | ||
522 | return dentry; /* try again with same dentry */ | ||
523 | } | ||
524 | if (IS_ROOT(dentry)) | ||
525 | parent = NULL; | ||
526 | else | ||
527 | parent = dentry->d_parent; | 449 | parent = dentry->d_parent; |
528 | if (parent && !spin_trylock(&parent->d_lock)) { | ||
529 | if (inode) | ||
530 | spin_unlock(&inode->i_lock); | ||
531 | goto relock; | ||
532 | } | ||
533 | 450 | ||
534 | /* | 451 | /* |
535 | * The dentry is now unrecoverably dead to the world. | 452 | * The dentry is now unrecoverably dead to the world. |
@@ -543,10 +460,103 @@ relock: | |||
543 | if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) | 460 | if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) |
544 | dentry->d_op->d_prune(dentry); | 461 | dentry->d_op->d_prune(dentry); |
545 | 462 | ||
546 | dentry_lru_del(dentry); | 463 | if (dentry->d_flags & DCACHE_LRU_LIST) { |
464 | if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) | ||
465 | d_lru_del(dentry); | ||
466 | } | ||
547 | /* if it was on the hash then remove it */ | 467 | /* if it was on the hash then remove it */ |
548 | __d_drop(dentry); | 468 | __d_drop(dentry); |
549 | return d_kill(dentry, parent); | 469 | list_del(&dentry->d_u.d_child); |
470 | /* | ||
471 | * Inform d_walk() that we are no longer attached to the | ||
472 | * dentry tree | ||
473 | */ | ||
474 | dentry->d_flags |= DCACHE_DENTRY_KILLED; | ||
475 | if (parent) | ||
476 | spin_unlock(&parent->d_lock); | ||
477 | dentry_iput(dentry); | ||
478 | /* | ||
479 | * dentry_iput drops the locks, at which point nobody (except | ||
480 | * transient RCU lookups) can reach this dentry. | ||
481 | */ | ||
482 | BUG_ON((int)dentry->d_lockref.count > 0); | ||
483 | this_cpu_dec(nr_dentry); | ||
484 | if (dentry->d_op && dentry->d_op->d_release) | ||
485 | dentry->d_op->d_release(dentry); | ||
486 | |||
487 | spin_lock(&dentry->d_lock); | ||
488 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { | ||
489 | dentry->d_flags |= DCACHE_MAY_FREE; | ||
490 | can_free = false; | ||
491 | } | ||
492 | spin_unlock(&dentry->d_lock); | ||
493 | if (likely(can_free)) | ||
494 | dentry_free(dentry); | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * Finish off a dentry we've decided to kill. | ||
499 | * dentry->d_lock must be held, returns with it unlocked. | ||
500 | * If ref is non-zero, then decrement the refcount too. | ||
501 | * Returns dentry requiring refcount drop, or NULL if we're done. | ||
502 | */ | ||
503 | static struct dentry *dentry_kill(struct dentry *dentry) | ||
504 | __releases(dentry->d_lock) | ||
505 | { | ||
506 | struct inode *inode = dentry->d_inode; | ||
507 | struct dentry *parent = NULL; | ||
508 | |||
509 | if (inode && unlikely(!spin_trylock(&inode->i_lock))) | ||
510 | goto failed; | ||
511 | |||
512 | if (!IS_ROOT(dentry)) { | ||
513 | parent = dentry->d_parent; | ||
514 | if (unlikely(!spin_trylock(&parent->d_lock))) { | ||
515 | if (inode) | ||
516 | spin_unlock(&inode->i_lock); | ||
517 | goto failed; | ||
518 | } | ||
519 | } | ||
520 | |||
521 | __dentry_kill(dentry); | ||
522 | return parent; | ||
523 | |||
524 | failed: | ||
525 | spin_unlock(&dentry->d_lock); | ||
526 | cpu_relax(); | ||
527 | return dentry; /* try again with same dentry */ | ||
528 | } | ||
529 | |||
530 | static inline struct dentry *lock_parent(struct dentry *dentry) | ||
531 | { | ||
532 | struct dentry *parent = dentry->d_parent; | ||
533 | if (IS_ROOT(dentry)) | ||
534 | return NULL; | ||
535 | if (likely(spin_trylock(&parent->d_lock))) | ||
536 | return parent; | ||
537 | spin_unlock(&dentry->d_lock); | ||
538 | rcu_read_lock(); | ||
539 | again: | ||
540 | parent = ACCESS_ONCE(dentry->d_parent); | ||
541 | spin_lock(&parent->d_lock); | ||
542 | /* | ||
543 | * We can't blindly lock dentry until we are sure | ||
544 | * that we won't violate the locking order. | ||
545 | * Any changes of dentry->d_parent must have | ||
546 | * been done with parent->d_lock held, so | ||
547 | * spin_lock() above is enough of a barrier | ||
548 | * for checking if it's still our child. | ||
549 | */ | ||
550 | if (unlikely(parent != dentry->d_parent)) { | ||
551 | spin_unlock(&parent->d_lock); | ||
552 | goto again; | ||
553 | } | ||
554 | rcu_read_unlock(); | ||
555 | if (parent != dentry) | ||
556 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | ||
557 | else | ||
558 | parent = NULL; | ||
559 | return parent; | ||
550 | } | 560 | } |
551 | 561 | ||
552 | /* | 562 | /* |
@@ -602,7 +612,7 @@ repeat: | |||
602 | return; | 612 | return; |
603 | 613 | ||
604 | kill_it: | 614 | kill_it: |
605 | dentry = dentry_kill(dentry, 1); | 615 | dentry = dentry_kill(dentry); |
606 | if (dentry) | 616 | if (dentry) |
607 | goto repeat; | 617 | goto repeat; |
608 | } | 618 | } |
@@ -815,64 +825,15 @@ restart: | |||
815 | } | 825 | } |
816 | EXPORT_SYMBOL(d_prune_aliases); | 826 | EXPORT_SYMBOL(d_prune_aliases); |
817 | 827 | ||
818 | /* | ||
819 | * Try to throw away a dentry - free the inode, dput the parent. | ||
820 | * Requires dentry->d_lock is held, and dentry->d_count == 0. | ||
821 | * Releases dentry->d_lock. | ||
822 | * | ||
823 | * This may fail if locks cannot be acquired no problem, just try again. | ||
824 | */ | ||
825 | static struct dentry * try_prune_one_dentry(struct dentry *dentry) | ||
826 | __releases(dentry->d_lock) | ||
827 | { | ||
828 | struct dentry *parent; | ||
829 | |||
830 | parent = dentry_kill(dentry, 0); | ||
831 | /* | ||
832 | * If dentry_kill returns NULL, we have nothing more to do. | ||
833 | * if it returns the same dentry, trylocks failed. In either | ||
834 | * case, just loop again. | ||
835 | * | ||
836 | * Otherwise, we need to prune ancestors too. This is necessary | ||
837 | * to prevent quadratic behavior of shrink_dcache_parent(), but | ||
838 | * is also expected to be beneficial in reducing dentry cache | ||
839 | * fragmentation. | ||
840 | */ | ||
841 | if (!parent) | ||
842 | return NULL; | ||
843 | if (parent == dentry) | ||
844 | return dentry; | ||
845 | |||
846 | /* Prune ancestors. */ | ||
847 | dentry = parent; | ||
848 | while (dentry) { | ||
849 | if (lockref_put_or_lock(&dentry->d_lockref)) | ||
850 | return NULL; | ||
851 | dentry = dentry_kill(dentry, 1); | ||
852 | } | ||
853 | return NULL; | ||
854 | } | ||
855 | |||
856 | static void shrink_dentry_list(struct list_head *list) | 828 | static void shrink_dentry_list(struct list_head *list) |
857 | { | 829 | { |
858 | struct dentry *dentry; | 830 | struct dentry *dentry, *parent; |
859 | |||
860 | rcu_read_lock(); | ||
861 | for (;;) { | ||
862 | dentry = list_entry_rcu(list->prev, struct dentry, d_lru); | ||
863 | if (&dentry->d_lru == list) | ||
864 | break; /* empty */ | ||
865 | 831 | ||
866 | /* | 832 | while (!list_empty(list)) { |
867 | * Get the dentry lock, and re-verify that the dentry is | 833 | struct inode *inode; |
868 | * this on the shrinking list. If it is, we know that | 834 | dentry = list_entry(list->prev, struct dentry, d_lru); |
869 | * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set. | ||
870 | */ | ||
871 | spin_lock(&dentry->d_lock); | 835 | spin_lock(&dentry->d_lock); |
872 | if (dentry != list_entry(list->prev, struct dentry, d_lru)) { | 836 | parent = lock_parent(dentry); |
873 | spin_unlock(&dentry->d_lock); | ||
874 | continue; | ||
875 | } | ||
876 | 837 | ||
877 | /* | 838 | /* |
878 | * The dispose list is isolated and dentries are not accounted | 839 | * The dispose list is isolated and dentries are not accounted |
@@ -885,30 +846,63 @@ static void shrink_dentry_list(struct list_head *list) | |||
885 | * We found an inuse dentry which was not removed from | 846 | * We found an inuse dentry which was not removed from |
886 | * the LRU because of laziness during lookup. Do not free it. | 847 | * the LRU because of laziness during lookup. Do not free it. |
887 | */ | 848 | */ |
888 | if (dentry->d_lockref.count) { | 849 | if ((int)dentry->d_lockref.count > 0) { |
889 | spin_unlock(&dentry->d_lock); | 850 | spin_unlock(&dentry->d_lock); |
851 | if (parent) | ||
852 | spin_unlock(&parent->d_lock); | ||
890 | continue; | 853 | continue; |
891 | } | 854 | } |
892 | rcu_read_unlock(); | ||
893 | 855 | ||
894 | /* | ||
895 | * If 'try_to_prune()' returns a dentry, it will | ||
896 | * be the same one we passed in, and d_lock will | ||
897 | * have been held the whole time, so it will not | ||
898 | * have been added to any other lists. We failed | ||
899 | * to get the inode lock. | ||
900 | * | ||
901 | * We just add it back to the shrink list. | ||
902 | */ | ||
903 | dentry = try_prune_one_dentry(dentry); | ||
904 | 856 | ||
905 | rcu_read_lock(); | 857 | if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { |
906 | if (dentry) { | 858 | bool can_free = dentry->d_flags & DCACHE_MAY_FREE; |
859 | spin_unlock(&dentry->d_lock); | ||
860 | if (parent) | ||
861 | spin_unlock(&parent->d_lock); | ||
862 | if (can_free) | ||
863 | dentry_free(dentry); | ||
864 | continue; | ||
865 | } | ||
866 | |||
867 | inode = dentry->d_inode; | ||
868 | if (inode && unlikely(!spin_trylock(&inode->i_lock))) { | ||
907 | d_shrink_add(dentry, list); | 869 | d_shrink_add(dentry, list); |
908 | spin_unlock(&dentry->d_lock); | 870 | spin_unlock(&dentry->d_lock); |
871 | if (parent) | ||
872 | spin_unlock(&parent->d_lock); | ||
873 | continue; | ||
874 | } | ||
875 | |||
876 | __dentry_kill(dentry); | ||
877 | |||
878 | /* | ||
879 | * We need to prune ancestors too. This is necessary to prevent | ||
880 | * quadratic behavior of shrink_dcache_parent(), but is also | ||
881 | * expected to be beneficial in reducing dentry cache | ||
882 | * fragmentation. | ||
883 | */ | ||
884 | dentry = parent; | ||
885 | while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { | ||
886 | parent = lock_parent(dentry); | ||
887 | if (dentry->d_lockref.count != 1) { | ||
888 | dentry->d_lockref.count--; | ||
889 | spin_unlock(&dentry->d_lock); | ||
890 | if (parent) | ||
891 | spin_unlock(&parent->d_lock); | ||
892 | break; | ||
893 | } | ||
894 | inode = dentry->d_inode; /* can't be NULL */ | ||
895 | if (unlikely(!spin_trylock(&inode->i_lock))) { | ||
896 | spin_unlock(&dentry->d_lock); | ||
897 | if (parent) | ||
898 | spin_unlock(&parent->d_lock); | ||
899 | cpu_relax(); | ||
900 | continue; | ||
901 | } | ||
902 | __dentry_kill(dentry); | ||
903 | dentry = parent; | ||
909 | } | 904 | } |
910 | } | 905 | } |
911 | rcu_read_unlock(); | ||
912 | } | 906 | } |
913 | 907 | ||
914 | static enum lru_status | 908 | static enum lru_status |
@@ -1261,34 +1255,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) | |||
1261 | if (data->start == dentry) | 1255 | if (data->start == dentry) |
1262 | goto out; | 1256 | goto out; |
1263 | 1257 | ||
1264 | /* | 1258 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
1265 | * move only zero ref count dentries to the dispose list. | ||
1266 | * | ||
1267 | * Those which are presently on the shrink list, being processed | ||
1268 | * by shrink_dentry_list(), shouldn't be moved. Otherwise the | ||
1269 | * loop in shrink_dcache_parent() might not make any progress | ||
1270 | * and loop forever. | ||
1271 | */ | ||
1272 | if (dentry->d_lockref.count) { | ||
1273 | dentry_lru_del(dentry); | ||
1274 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | ||
1275 | /* | ||
1276 | * We can't use d_lru_shrink_move() because we | ||
1277 | * need to get the global LRU lock and do the | ||
1278 | * LRU accounting. | ||
1279 | */ | ||
1280 | d_lru_del(dentry); | ||
1281 | d_shrink_add(dentry, &data->dispose); | ||
1282 | data->found++; | 1259 | data->found++; |
1283 | ret = D_WALK_NORETRY; | 1260 | } else { |
1261 | if (dentry->d_flags & DCACHE_LRU_LIST) | ||
1262 | d_lru_del(dentry); | ||
1263 | if (!dentry->d_lockref.count) { | ||
1264 | d_shrink_add(dentry, &data->dispose); | ||
1265 | data->found++; | ||
1266 | } | ||
1284 | } | 1267 | } |
1285 | /* | 1268 | /* |
1286 | * We can return to the caller if we have found some (this | 1269 | * We can return to the caller if we have found some (this |
1287 | * ensures forward progress). We'll be coming back to find | 1270 | * ensures forward progress). We'll be coming back to find |
1288 | * the rest. | 1271 | * the rest. |
1289 | */ | 1272 | */ |
1290 | if (data->found && need_resched()) | 1273 | if (!list_empty(&data->dispose)) |
1291 | ret = D_WALK_QUIT; | 1274 | ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; |
1292 | out: | 1275 | out: |
1293 | return ret; | 1276 | return ret; |
1294 | } | 1277 | } |
@@ -1318,45 +1301,35 @@ void shrink_dcache_parent(struct dentry *parent) | |||
1318 | } | 1301 | } |
1319 | EXPORT_SYMBOL(shrink_dcache_parent); | 1302 | EXPORT_SYMBOL(shrink_dcache_parent); |
1320 | 1303 | ||
1321 | static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) | 1304 | static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) |
1322 | { | 1305 | { |
1323 | struct select_data *data = _data; | 1306 | /* it has busy descendents; complain about those instead */ |
1324 | enum d_walk_ret ret = D_WALK_CONTINUE; | 1307 | if (!list_empty(&dentry->d_subdirs)) |
1308 | return D_WALK_CONTINUE; | ||
1325 | 1309 | ||
1326 | if (dentry->d_lockref.count) { | 1310 | /* root with refcount 1 is fine */ |
1327 | dentry_lru_del(dentry); | 1311 | if (dentry == _data && dentry->d_lockref.count == 1) |
1328 | if (likely(!list_empty(&dentry->d_subdirs))) | 1312 | return D_WALK_CONTINUE; |
1329 | goto out; | 1313 | |
1330 | if (dentry == data->start && dentry->d_lockref.count == 1) | 1314 | printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " |
1331 | goto out; | 1315 | " still in use (%d) [unmount of %s %s]\n", |
1332 | printk(KERN_ERR | ||
1333 | "BUG: Dentry %p{i=%lx,n=%s}" | ||
1334 | " still in use (%d)" | ||
1335 | " [unmount of %s %s]\n", | ||
1336 | dentry, | 1316 | dentry, |
1337 | dentry->d_inode ? | 1317 | dentry->d_inode ? |
1338 | dentry->d_inode->i_ino : 0UL, | 1318 | dentry->d_inode->i_ino : 0UL, |
1339 | dentry->d_name.name, | 1319 | dentry, |
1340 | dentry->d_lockref.count, | 1320 | dentry->d_lockref.count, |
1341 | dentry->d_sb->s_type->name, | 1321 | dentry->d_sb->s_type->name, |
1342 | dentry->d_sb->s_id); | 1322 | dentry->d_sb->s_id); |
1343 | BUG(); | 1323 | WARN_ON(1); |
1344 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | 1324 | return D_WALK_CONTINUE; |
1345 | /* | 1325 | } |
1346 | * We can't use d_lru_shrink_move() because we | 1326 | |
1347 | * need to get the global LRU lock and do the | 1327 | static void do_one_tree(struct dentry *dentry) |
1348 | * LRU accounting. | 1328 | { |
1349 | */ | 1329 | shrink_dcache_parent(dentry); |
1350 | if (dentry->d_flags & DCACHE_LRU_LIST) | 1330 | d_walk(dentry, dentry, umount_check, NULL); |
1351 | d_lru_del(dentry); | 1331 | d_drop(dentry); |
1352 | d_shrink_add(dentry, &data->dispose); | 1332 | dput(dentry); |
1353 | data->found++; | ||
1354 | ret = D_WALK_NORETRY; | ||
1355 | } | ||
1356 | out: | ||
1357 | if (data->found && need_resched()) | ||
1358 | ret = D_WALK_QUIT; | ||
1359 | return ret; | ||
1360 | } | 1333 | } |
1361 | 1334 | ||
1362 | /* | 1335 | /* |
@@ -1366,40 +1339,15 @@ void shrink_dcache_for_umount(struct super_block *sb) | |||
1366 | { | 1339 | { |
1367 | struct dentry *dentry; | 1340 | struct dentry *dentry; |
1368 | 1341 | ||
1369 | if (down_read_trylock(&sb->s_umount)) | 1342 | WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); |
1370 | BUG(); | ||
1371 | 1343 | ||
1372 | dentry = sb->s_root; | 1344 | dentry = sb->s_root; |
1373 | sb->s_root = NULL; | 1345 | sb->s_root = NULL; |
1374 | for (;;) { | 1346 | do_one_tree(dentry); |
1375 | struct select_data data; | ||
1376 | |||
1377 | INIT_LIST_HEAD(&data.dispose); | ||
1378 | data.start = dentry; | ||
1379 | data.found = 0; | ||
1380 | |||
1381 | d_walk(dentry, &data, umount_collect, NULL); | ||
1382 | if (!data.found) | ||
1383 | break; | ||
1384 | |||
1385 | shrink_dentry_list(&data.dispose); | ||
1386 | cond_resched(); | ||
1387 | } | ||
1388 | d_drop(dentry); | ||
1389 | dput(dentry); | ||
1390 | 1347 | ||
1391 | while (!hlist_bl_empty(&sb->s_anon)) { | 1348 | while (!hlist_bl_empty(&sb->s_anon)) { |
1392 | struct select_data data; | 1349 | dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); |
1393 | dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); | 1350 | do_one_tree(dentry); |
1394 | |||
1395 | INIT_LIST_HEAD(&data.dispose); | ||
1396 | data.start = NULL; | ||
1397 | data.found = 0; | ||
1398 | |||
1399 | d_walk(dentry, &data, umount_collect, NULL); | ||
1400 | if (data.found) | ||
1401 | shrink_dentry_list(&data.dispose); | ||
1402 | cond_resched(); | ||
1403 | } | 1351 | } |
1404 | } | 1352 | } |
1405 | 1353 | ||
@@ -1647,8 +1595,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) | |||
1647 | unsigned add_flags = d_flags_for_inode(inode); | 1595 | unsigned add_flags = d_flags_for_inode(inode); |
1648 | 1596 | ||
1649 | spin_lock(&dentry->d_lock); | 1597 | spin_lock(&dentry->d_lock); |
1650 | dentry->d_flags &= ~DCACHE_ENTRY_TYPE; | 1598 | __d_set_type(dentry, add_flags); |
1651 | dentry->d_flags |= add_flags; | ||
1652 | if (inode) | 1599 | if (inode) |
1653 | hlist_add_head(&dentry->d_alias, &inode->i_dentry); | 1600 | hlist_add_head(&dentry->d_alias, &inode->i_dentry); |
1654 | dentry->d_inode = inode; | 1601 | dentry->d_inode = inode; |