diff options
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 318 |
1 files changed, 102 insertions, 216 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 40707d88a945..42ae01eefc07 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -246,16 +246,8 @@ static void __d_free(struct rcu_head *head) | |||
246 | kmem_cache_free(dentry_cache, dentry); | 246 | kmem_cache_free(dentry_cache, dentry); |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | 249 | static void dentry_free(struct dentry *dentry) |
250 | * no locks, please. | ||
251 | */ | ||
252 | static void d_free(struct dentry *dentry) | ||
253 | { | 250 | { |
254 | BUG_ON((int)dentry->d_lockref.count > 0); | ||
255 | this_cpu_dec(nr_dentry); | ||
256 | if (dentry->d_op && dentry->d_op->d_release) | ||
257 | dentry->d_op->d_release(dentry); | ||
258 | |||
259 | /* if dentry was never visible to RCU, immediate free is OK */ | 251 | /* if dentry was never visible to RCU, immediate free is OK */ |
260 | if (!(dentry->d_flags & DCACHE_RCUACCESS)) | 252 | if (!(dentry->d_flags & DCACHE_RCUACCESS)) |
261 | __d_free(&dentry->d_u.d_rcu); | 253 | __d_free(&dentry->d_u.d_rcu); |
@@ -403,56 +395,6 @@ static void dentry_lru_add(struct dentry *dentry) | |||
403 | d_lru_add(dentry); | 395 | d_lru_add(dentry); |
404 | } | 396 | } |
405 | 397 | ||
406 | /* | ||
407 | * Remove a dentry with references from the LRU. | ||
408 | * | ||
409 | * If we are on the shrink list, then we can get to try_prune_one_dentry() and | ||
410 | * lose our last reference through the parent walk. In this case, we need to | ||
411 | * remove ourselves from the shrink list, not the LRU. | ||
412 | */ | ||
413 | static void dentry_lru_del(struct dentry *dentry) | ||
414 | { | ||
415 | if (dentry->d_flags & DCACHE_LRU_LIST) { | ||
416 | if (dentry->d_flags & DCACHE_SHRINK_LIST) | ||
417 | return d_shrink_del(dentry); | ||
418 | d_lru_del(dentry); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * d_kill - kill dentry and return parent | ||
424 | * @dentry: dentry to kill | ||
425 | * @parent: parent dentry | ||
426 | * | ||
427 | * The dentry must already be unhashed and removed from the LRU. | ||
428 | * | ||
429 | * If this is the root of the dentry tree, return NULL. | ||
430 | * | ||
431 | * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by | ||
432 | * d_kill. | ||
433 | */ | ||
434 | static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) | ||
435 | __releases(dentry->d_lock) | ||
436 | __releases(parent->d_lock) | ||
437 | __releases(dentry->d_inode->i_lock) | ||
438 | { | ||
439 | list_del(&dentry->d_u.d_child); | ||
440 | /* | ||
441 | * Inform d_walk() that we are no longer attached to the | ||
442 | * dentry tree | ||
443 | */ | ||
444 | dentry->d_flags |= DCACHE_DENTRY_KILLED; | ||
445 | if (parent) | ||
446 | spin_unlock(&parent->d_lock); | ||
447 | dentry_iput(dentry); | ||
448 | /* | ||
449 | * dentry_iput drops the locks, at which point nobody (except | ||
450 | * transient RCU lookups) can reach this dentry. | ||
451 | */ | ||
452 | d_free(dentry); | ||
453 | return parent; | ||
454 | } | ||
455 | |||
456 | /** | 398 | /** |
457 | * d_drop - drop a dentry | 399 | * d_drop - drop a dentry |
458 | * @dentry: dentry to drop | 400 | * @dentry: dentry to drop |
@@ -510,7 +452,14 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure) | |||
510 | __releases(dentry->d_lock) | 452 | __releases(dentry->d_lock) |
511 | { | 453 | { |
512 | struct inode *inode; | 454 | struct inode *inode; |
513 | struct dentry *parent; | 455 | struct dentry *parent = NULL; |
456 | bool can_free = true; | ||
457 | |||
458 | if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { | ||
459 | can_free = dentry->d_flags & DCACHE_MAY_FREE; | ||
460 | spin_unlock(&dentry->d_lock); | ||
461 | goto out; | ||
462 | } | ||
514 | 463 | ||
515 | inode = dentry->d_inode; | 464 | inode = dentry->d_inode; |
516 | if (inode && !spin_trylock(&inode->i_lock)) { | 465 | if (inode && !spin_trylock(&inode->i_lock)) { |
@@ -521,9 +470,7 @@ relock: | |||
521 | } | 470 | } |
522 | return dentry; /* try again with same dentry */ | 471 | return dentry; /* try again with same dentry */ |
523 | } | 472 | } |
524 | if (IS_ROOT(dentry)) | 473 | if (!IS_ROOT(dentry)) |
525 | parent = NULL; | ||
526 | else | ||
527 | parent = dentry->d_parent; | 474 | parent = dentry->d_parent; |
528 | if (parent && !spin_trylock(&parent->d_lock)) { | 475 | if (parent && !spin_trylock(&parent->d_lock)) { |
529 | if (inode) | 476 | if (inode) |
@@ -543,10 +490,40 @@ relock: | |||
543 | if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) | 490 | if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) |
544 | dentry->d_op->d_prune(dentry); | 491 | dentry->d_op->d_prune(dentry); |
545 | 492 | ||
546 | dentry_lru_del(dentry); | 493 | if (dentry->d_flags & DCACHE_LRU_LIST) { |
494 | if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) | ||
495 | d_lru_del(dentry); | ||
496 | } | ||
547 | /* if it was on the hash then remove it */ | 497 | /* if it was on the hash then remove it */ |
548 | __d_drop(dentry); | 498 | __d_drop(dentry); |
549 | return d_kill(dentry, parent); | 499 | list_del(&dentry->d_u.d_child); |
500 | /* | ||
501 | * Inform d_walk() that we are no longer attached to the | ||
502 | * dentry tree | ||
503 | */ | ||
504 | dentry->d_flags |= DCACHE_DENTRY_KILLED; | ||
505 | if (parent) | ||
506 | spin_unlock(&parent->d_lock); | ||
507 | dentry_iput(dentry); | ||
508 | /* | ||
509 | * dentry_iput drops the locks, at which point nobody (except | ||
510 | * transient RCU lookups) can reach this dentry. | ||
511 | */ | ||
512 | BUG_ON((int)dentry->d_lockref.count > 0); | ||
513 | this_cpu_dec(nr_dentry); | ||
514 | if (dentry->d_op && dentry->d_op->d_release) | ||
515 | dentry->d_op->d_release(dentry); | ||
516 | |||
517 | spin_lock(&dentry->d_lock); | ||
518 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { | ||
519 | dentry->d_flags |= DCACHE_MAY_FREE; | ||
520 | can_free = false; | ||
521 | } | ||
522 | spin_unlock(&dentry->d_lock); | ||
523 | out: | ||
524 | if (likely(can_free)) | ||
525 | dentry_free(dentry); | ||
526 | return parent; | ||
550 | } | 527 | } |
551 | 528 | ||
552 | /* | 529 | /* |
@@ -815,65 +792,13 @@ restart: | |||
815 | } | 792 | } |
816 | EXPORT_SYMBOL(d_prune_aliases); | 793 | EXPORT_SYMBOL(d_prune_aliases); |
817 | 794 | ||
818 | /* | ||
819 | * Try to throw away a dentry - free the inode, dput the parent. | ||
820 | * Requires dentry->d_lock is held, and dentry->d_count == 0. | ||
821 | * Releases dentry->d_lock. | ||
822 | * | ||
823 | * This may fail if locks cannot be acquired no problem, just try again. | ||
824 | */ | ||
825 | static struct dentry * try_prune_one_dentry(struct dentry *dentry) | ||
826 | __releases(dentry->d_lock) | ||
827 | { | ||
828 | struct dentry *parent; | ||
829 | |||
830 | parent = dentry_kill(dentry, 0); | ||
831 | /* | ||
832 | * If dentry_kill returns NULL, we have nothing more to do. | ||
833 | * if it returns the same dentry, trylocks failed. In either | ||
834 | * case, just loop again. | ||
835 | * | ||
836 | * Otherwise, we need to prune ancestors too. This is necessary | ||
837 | * to prevent quadratic behavior of shrink_dcache_parent(), but | ||
838 | * is also expected to be beneficial in reducing dentry cache | ||
839 | * fragmentation. | ||
840 | */ | ||
841 | if (!parent) | ||
842 | return NULL; | ||
843 | if (parent == dentry) | ||
844 | return dentry; | ||
845 | |||
846 | /* Prune ancestors. */ | ||
847 | dentry = parent; | ||
848 | while (dentry) { | ||
849 | if (lockref_put_or_lock(&dentry->d_lockref)) | ||
850 | return NULL; | ||
851 | dentry = dentry_kill(dentry, 1); | ||
852 | } | ||
853 | return NULL; | ||
854 | } | ||
855 | |||
856 | static void shrink_dentry_list(struct list_head *list) | 795 | static void shrink_dentry_list(struct list_head *list) |
857 | { | 796 | { |
858 | struct dentry *dentry; | 797 | struct dentry *dentry, *parent; |
859 | 798 | ||
860 | rcu_read_lock(); | 799 | while (!list_empty(list)) { |
861 | for (;;) { | 800 | dentry = list_entry(list->prev, struct dentry, d_lru); |
862 | dentry = list_entry_rcu(list->prev, struct dentry, d_lru); | ||
863 | if (&dentry->d_lru == list) | ||
864 | break; /* empty */ | ||
865 | |||
866 | /* | ||
867 | * Get the dentry lock, and re-verify that the dentry is | ||
868 | * this on the shrinking list. If it is, we know that | ||
869 | * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set. | ||
870 | */ | ||
871 | spin_lock(&dentry->d_lock); | 801 | spin_lock(&dentry->d_lock); |
872 | if (dentry != list_entry(list->prev, struct dentry, d_lru)) { | ||
873 | spin_unlock(&dentry->d_lock); | ||
874 | continue; | ||
875 | } | ||
876 | |||
877 | /* | 802 | /* |
878 | * The dispose list is isolated and dentries are not accounted | 803 | * The dispose list is isolated and dentries are not accounted |
879 | * to the LRU here, so we can simply remove it from the list | 804 | * to the LRU here, so we can simply remove it from the list |
@@ -885,30 +810,38 @@ static void shrink_dentry_list(struct list_head *list) | |||
885 | * We found an inuse dentry which was not removed from | 810 | * We found an inuse dentry which was not removed from |
886 | * the LRU because of laziness during lookup. Do not free it. | 811 | * the LRU because of laziness during lookup. Do not free it. |
887 | */ | 812 | */ |
888 | if (dentry->d_lockref.count) { | 813 | if ((int)dentry->d_lockref.count > 0) { |
889 | spin_unlock(&dentry->d_lock); | 814 | spin_unlock(&dentry->d_lock); |
890 | continue; | 815 | continue; |
891 | } | 816 | } |
892 | rcu_read_unlock(); | ||
893 | 817 | ||
818 | parent = dentry_kill(dentry, 0); | ||
894 | /* | 819 | /* |
895 | * If 'try_to_prune()' returns a dentry, it will | 820 | * If dentry_kill returns NULL, we have nothing more to do. |
896 | * be the same one we passed in, and d_lock will | ||
897 | * have been held the whole time, so it will not | ||
898 | * have been added to any other lists. We failed | ||
899 | * to get the inode lock. | ||
900 | * | ||
901 | * We just add it back to the shrink list. | ||
902 | */ | 821 | */ |
903 | dentry = try_prune_one_dentry(dentry); | 822 | if (!parent) |
823 | continue; | ||
904 | 824 | ||
905 | rcu_read_lock(); | 825 | if (unlikely(parent == dentry)) { |
906 | if (dentry) { | 826 | /* |
827 | * trylocks have failed and d_lock has been held the | ||
828 | * whole time, so it could not have been added to any | ||
829 | * other lists. Just add it back to the shrink list. | ||
830 | */ | ||
907 | d_shrink_add(dentry, list); | 831 | d_shrink_add(dentry, list); |
908 | spin_unlock(&dentry->d_lock); | 832 | spin_unlock(&dentry->d_lock); |
833 | continue; | ||
909 | } | 834 | } |
835 | /* | ||
836 | * We need to prune ancestors too. This is necessary to prevent | ||
837 | * quadratic behavior of shrink_dcache_parent(), but is also | ||
838 | * expected to be beneficial in reducing dentry cache | ||
839 | * fragmentation. | ||
840 | */ | ||
841 | dentry = parent; | ||
842 | while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) | ||
843 | dentry = dentry_kill(dentry, 1); | ||
910 | } | 844 | } |
911 | rcu_read_unlock(); | ||
912 | } | 845 | } |
913 | 846 | ||
914 | static enum lru_status | 847 | static enum lru_status |
@@ -1261,34 +1194,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) | |||
1261 | if (data->start == dentry) | 1194 | if (data->start == dentry) |
1262 | goto out; | 1195 | goto out; |
1263 | 1196 | ||
1264 | /* | 1197 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
1265 | * move only zero ref count dentries to the dispose list. | ||
1266 | * | ||
1267 | * Those which are presently on the shrink list, being processed | ||
1268 | * by shrink_dentry_list(), shouldn't be moved. Otherwise the | ||
1269 | * loop in shrink_dcache_parent() might not make any progress | ||
1270 | * and loop forever. | ||
1271 | */ | ||
1272 | if (dentry->d_lockref.count) { | ||
1273 | dentry_lru_del(dentry); | ||
1274 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | ||
1275 | /* | ||
1276 | * We can't use d_lru_shrink_move() because we | ||
1277 | * need to get the global LRU lock and do the | ||
1278 | * LRU accounting. | ||
1279 | */ | ||
1280 | d_lru_del(dentry); | ||
1281 | d_shrink_add(dentry, &data->dispose); | ||
1282 | data->found++; | 1198 | data->found++; |
1283 | ret = D_WALK_NORETRY; | 1199 | } else { |
1200 | if (dentry->d_flags & DCACHE_LRU_LIST) | ||
1201 | d_lru_del(dentry); | ||
1202 | if (!dentry->d_lockref.count) { | ||
1203 | d_shrink_add(dentry, &data->dispose); | ||
1204 | data->found++; | ||
1205 | } | ||
1284 | } | 1206 | } |
1285 | /* | 1207 | /* |
1286 | * We can return to the caller if we have found some (this | 1208 | * We can return to the caller if we have found some (this |
1287 | * ensures forward progress). We'll be coming back to find | 1209 | * ensures forward progress). We'll be coming back to find |
1288 | * the rest. | 1210 | * the rest. |
1289 | */ | 1211 | */ |
1290 | if (data->found && need_resched()) | 1212 | if (!list_empty(&data->dispose)) |
1291 | ret = D_WALK_QUIT; | 1213 | ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; |
1292 | out: | 1214 | out: |
1293 | return ret; | 1215 | return ret; |
1294 | } | 1216 | } |
@@ -1318,45 +1240,35 @@ void shrink_dcache_parent(struct dentry *parent) | |||
1318 | } | 1240 | } |
1319 | EXPORT_SYMBOL(shrink_dcache_parent); | 1241 | EXPORT_SYMBOL(shrink_dcache_parent); |
1320 | 1242 | ||
1321 | static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) | 1243 | static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) |
1322 | { | 1244 | { |
1323 | struct select_data *data = _data; | 1245 | /* it has busy descendents; complain about those instead */ |
1324 | enum d_walk_ret ret = D_WALK_CONTINUE; | 1246 | if (!list_empty(&dentry->d_subdirs)) |
1247 | return D_WALK_CONTINUE; | ||
1325 | 1248 | ||
1326 | if (dentry->d_lockref.count) { | 1249 | /* root with refcount 1 is fine */ |
1327 | dentry_lru_del(dentry); | 1250 | if (dentry == _data && dentry->d_lockref.count == 1) |
1328 | if (likely(!list_empty(&dentry->d_subdirs))) | 1251 | return D_WALK_CONTINUE; |
1329 | goto out; | 1252 | |
1330 | if (dentry == data->start && dentry->d_lockref.count == 1) | 1253 | printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " |
1331 | goto out; | 1254 | " still in use (%d) [unmount of %s %s]\n", |
1332 | printk(KERN_ERR | ||
1333 | "BUG: Dentry %p{i=%lx,n=%s}" | ||
1334 | " still in use (%d)" | ||
1335 | " [unmount of %s %s]\n", | ||
1336 | dentry, | 1255 | dentry, |
1337 | dentry->d_inode ? | 1256 | dentry->d_inode ? |
1338 | dentry->d_inode->i_ino : 0UL, | 1257 | dentry->d_inode->i_ino : 0UL, |
1339 | dentry->d_name.name, | 1258 | dentry, |
1340 | dentry->d_lockref.count, | 1259 | dentry->d_lockref.count, |
1341 | dentry->d_sb->s_type->name, | 1260 | dentry->d_sb->s_type->name, |
1342 | dentry->d_sb->s_id); | 1261 | dentry->d_sb->s_id); |
1343 | BUG(); | 1262 | WARN_ON(1); |
1344 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | 1263 | return D_WALK_CONTINUE; |
1345 | /* | 1264 | } |
1346 | * We can't use d_lru_shrink_move() because we | 1265 | |
1347 | * need to get the global LRU lock and do the | 1266 | static void do_one_tree(struct dentry *dentry) |
1348 | * LRU accounting. | 1267 | { |
1349 | */ | 1268 | shrink_dcache_parent(dentry); |
1350 | if (dentry->d_flags & DCACHE_LRU_LIST) | 1269 | d_walk(dentry, dentry, umount_check, NULL); |
1351 | d_lru_del(dentry); | 1270 | d_drop(dentry); |
1352 | d_shrink_add(dentry, &data->dispose); | 1271 | dput(dentry); |
1353 | data->found++; | ||
1354 | ret = D_WALK_NORETRY; | ||
1355 | } | ||
1356 | out: | ||
1357 | if (data->found && need_resched()) | ||
1358 | ret = D_WALK_QUIT; | ||
1359 | return ret; | ||
1360 | } | 1272 | } |
1361 | 1273 | ||
1362 | /* | 1274 | /* |
@@ -1366,40 +1278,15 @@ void shrink_dcache_for_umount(struct super_block *sb) | |||
1366 | { | 1278 | { |
1367 | struct dentry *dentry; | 1279 | struct dentry *dentry; |
1368 | 1280 | ||
1369 | if (down_read_trylock(&sb->s_umount)) | 1281 | WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); |
1370 | BUG(); | ||
1371 | 1282 | ||
1372 | dentry = sb->s_root; | 1283 | dentry = sb->s_root; |
1373 | sb->s_root = NULL; | 1284 | sb->s_root = NULL; |
1374 | for (;;) { | 1285 | do_one_tree(dentry); |
1375 | struct select_data data; | ||
1376 | |||
1377 | INIT_LIST_HEAD(&data.dispose); | ||
1378 | data.start = dentry; | ||
1379 | data.found = 0; | ||
1380 | |||
1381 | d_walk(dentry, &data, umount_collect, NULL); | ||
1382 | if (!data.found) | ||
1383 | break; | ||
1384 | |||
1385 | shrink_dentry_list(&data.dispose); | ||
1386 | cond_resched(); | ||
1387 | } | ||
1388 | d_drop(dentry); | ||
1389 | dput(dentry); | ||
1390 | 1286 | ||
1391 | while (!hlist_bl_empty(&sb->s_anon)) { | 1287 | while (!hlist_bl_empty(&sb->s_anon)) { |
1392 | struct select_data data; | 1288 | dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); |
1393 | dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); | 1289 | do_one_tree(dentry); |
1394 | |||
1395 | INIT_LIST_HEAD(&data.dispose); | ||
1396 | data.start = NULL; | ||
1397 | data.found = 0; | ||
1398 | |||
1399 | d_walk(dentry, &data, umount_collect, NULL); | ||
1400 | if (data.found) | ||
1401 | shrink_dentry_list(&data.dispose); | ||
1402 | cond_resched(); | ||
1403 | } | 1290 | } |
1404 | } | 1291 | } |
1405 | 1292 | ||
@@ -1647,8 +1534,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) | |||
1647 | unsigned add_flags = d_flags_for_inode(inode); | 1534 | unsigned add_flags = d_flags_for_inode(inode); |
1648 | 1535 | ||
1649 | spin_lock(&dentry->d_lock); | 1536 | spin_lock(&dentry->d_lock); |
1650 | dentry->d_flags &= ~DCACHE_ENTRY_TYPE; | 1537 | __d_set_type(dentry, add_flags); |
1651 | dentry->d_flags |= add_flags; | ||
1652 | if (inode) | 1538 | if (inode) |
1653 | hlist_add_head(&dentry->d_alias, &inode->i_dentry); | 1539 | hlist_add_head(&dentry->d_alias, &inode->i_dentry); |
1654 | dentry->d_inode = inode; | 1540 | dentry->d_inode = inode; |