diff options
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 128 |
1 files changed, 101 insertions, 27 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 1bd4614ce93b..41000305d716 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -357,15 +357,80 @@ static void dentry_unlink_inode(struct dentry * dentry) | |||
357 | } | 357 | } |
358 | 358 | ||
359 | /* | 359 | /* |
360 | * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry | ||
361 | * is in use - which includes both the "real" per-superblock | ||
362 | * LRU list _and_ the DCACHE_SHRINK_LIST use. | ||
363 | * | ||
364 | * The DCACHE_SHRINK_LIST bit is set whenever the dentry is | ||
365 | * on the shrink list (ie not on the superblock LRU list). | ||
366 | * | ||
367 | * The per-cpu "nr_dentry_unused" counters are updated with | ||
368 | * the DCACHE_LRU_LIST bit. | ||
369 | * | ||
370 | * These helper functions make sure we always follow the | ||
371 | * rules. d_lock must be held by the caller. | ||
372 | */ | ||
373 | #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) | ||
374 | static void d_lru_add(struct dentry *dentry) | ||
375 | { | ||
376 | D_FLAG_VERIFY(dentry, 0); | ||
377 | dentry->d_flags |= DCACHE_LRU_LIST; | ||
378 | this_cpu_inc(nr_dentry_unused); | ||
379 | WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); | ||
380 | } | ||
381 | |||
382 | static void d_lru_del(struct dentry *dentry) | ||
383 | { | ||
384 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | ||
385 | dentry->d_flags &= ~DCACHE_LRU_LIST; | ||
386 | this_cpu_dec(nr_dentry_unused); | ||
387 | WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); | ||
388 | } | ||
389 | |||
390 | static void d_shrink_del(struct dentry *dentry) | ||
391 | { | ||
392 | D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); | ||
393 | list_del_init(&dentry->d_lru); | ||
394 | dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); | ||
395 | this_cpu_dec(nr_dentry_unused); | ||
396 | } | ||
397 | |||
398 | static void d_shrink_add(struct dentry *dentry, struct list_head *list) | ||
399 | { | ||
400 | D_FLAG_VERIFY(dentry, 0); | ||
401 | list_add(&dentry->d_lru, list); | ||
402 | dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; | ||
403 | this_cpu_inc(nr_dentry_unused); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * These can only be called under the global LRU lock, ie during the | ||
408 | * callback for freeing the LRU list. "isolate" removes it from the | ||
409 | * LRU lists entirely, while shrink_move moves it to the indicated | ||
410 | * private list. | ||
411 | */ | ||
412 | static void d_lru_isolate(struct dentry *dentry) | ||
413 | { | ||
414 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | ||
415 | dentry->d_flags &= ~DCACHE_LRU_LIST; | ||
416 | this_cpu_dec(nr_dentry_unused); | ||
417 | list_del_init(&dentry->d_lru); | ||
418 | } | ||
419 | |||
420 | static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list) | ||
421 | { | ||
422 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | ||
423 | dentry->d_flags |= DCACHE_SHRINK_LIST; | ||
424 | list_move_tail(&dentry->d_lru, list); | ||
425 | } | ||
426 | |||
427 | /* | ||
360 | * dentry_lru_(add|del)_list) must be called with d_lock held. | 428 | * dentry_lru_(add|del)_list) must be called with d_lock held. |
361 | */ | 429 | */ |
362 | static void dentry_lru_add(struct dentry *dentry) | 430 | static void dentry_lru_add(struct dentry *dentry) |
363 | { | 431 | { |
364 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { | 432 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) |
365 | if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) | 433 | d_lru_add(dentry); |
366 | this_cpu_inc(nr_dentry_unused); | ||
367 | dentry->d_flags |= DCACHE_LRU_LIST; | ||
368 | } | ||
369 | } | 434 | } |
370 | 435 | ||
371 | /* | 436 | /* |
@@ -377,15 +442,11 @@ static void dentry_lru_add(struct dentry *dentry) | |||
377 | */ | 442 | */ |
378 | static void dentry_lru_del(struct dentry *dentry) | 443 | static void dentry_lru_del(struct dentry *dentry) |
379 | { | 444 | { |
380 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { | 445 | if (dentry->d_flags & DCACHE_LRU_LIST) { |
381 | list_del_init(&dentry->d_lru); | 446 | if (dentry->d_flags & DCACHE_SHRINK_LIST) |
382 | dentry->d_flags &= ~DCACHE_SHRINK_LIST; | 447 | return d_shrink_del(dentry); |
383 | return; | 448 | d_lru_del(dentry); |
384 | } | 449 | } |
385 | |||
386 | if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) | ||
387 | this_cpu_dec(nr_dentry_unused); | ||
388 | dentry->d_flags &= ~DCACHE_LRU_LIST; | ||
389 | } | 450 | } |
390 | 451 | ||
391 | /** | 452 | /** |
@@ -837,6 +898,12 @@ static void shrink_dentry_list(struct list_head *list) | |||
837 | dentry = list_entry_rcu(list->prev, struct dentry, d_lru); | 898 | dentry = list_entry_rcu(list->prev, struct dentry, d_lru); |
838 | if (&dentry->d_lru == list) | 899 | if (&dentry->d_lru == list) |
839 | break; /* empty */ | 900 | break; /* empty */ |
901 | |||
902 | /* | ||
903 | * Get the dentry lock, and re-verify that the dentry is | ||
904 | * this on the shrinking list. If it is, we know that | ||
905 | * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set. | ||
906 | */ | ||
840 | spin_lock(&dentry->d_lock); | 907 | spin_lock(&dentry->d_lock); |
841 | if (dentry != list_entry(list->prev, struct dentry, d_lru)) { | 908 | if (dentry != list_entry(list->prev, struct dentry, d_lru)) { |
842 | spin_unlock(&dentry->d_lock); | 909 | spin_unlock(&dentry->d_lock); |
@@ -848,8 +915,7 @@ static void shrink_dentry_list(struct list_head *list) | |||
848 | * to the LRU here, so we can simply remove it from the list | 915 | * to the LRU here, so we can simply remove it from the list |
849 | * here regardless of whether it is referenced or not. | 916 | * here regardless of whether it is referenced or not. |
850 | */ | 917 | */ |
851 | list_del_init(&dentry->d_lru); | 918 | d_shrink_del(dentry); |
852 | dentry->d_flags &= ~DCACHE_SHRINK_LIST; | ||
853 | 919 | ||
854 | /* | 920 | /* |
855 | * We found an inuse dentry which was not removed from | 921 | * We found an inuse dentry which was not removed from |
@@ -861,12 +927,20 @@ static void shrink_dentry_list(struct list_head *list) | |||
861 | } | 927 | } |
862 | rcu_read_unlock(); | 928 | rcu_read_unlock(); |
863 | 929 | ||
930 | /* | ||
931 | * If 'try_to_prune()' returns a dentry, it will | ||
932 | * be the same one we passed in, and d_lock will | ||
933 | * have been held the whole time, so it will not | ||
934 | * have been added to any other lists. We failed | ||
935 | * to get the inode lock. | ||
936 | * | ||
937 | * We just add it back to the shrink list. | ||
938 | */ | ||
864 | dentry = try_prune_one_dentry(dentry); | 939 | dentry = try_prune_one_dentry(dentry); |
865 | 940 | ||
866 | rcu_read_lock(); | 941 | rcu_read_lock(); |
867 | if (dentry) { | 942 | if (dentry) { |
868 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 943 | d_shrink_add(dentry, list); |
869 | list_add(&dentry->d_lru, list); | ||
870 | spin_unlock(&dentry->d_lock); | 944 | spin_unlock(&dentry->d_lock); |
871 | } | 945 | } |
872 | } | 946 | } |
@@ -894,7 +968,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) | |||
894 | * another pass through the LRU. | 968 | * another pass through the LRU. |
895 | */ | 969 | */ |
896 | if (dentry->d_lockref.count) { | 970 | if (dentry->d_lockref.count) { |
897 | list_del_init(&dentry->d_lru); | 971 | d_lru_isolate(dentry); |
898 | spin_unlock(&dentry->d_lock); | 972 | spin_unlock(&dentry->d_lock); |
899 | return LRU_REMOVED; | 973 | return LRU_REMOVED; |
900 | } | 974 | } |
@@ -925,9 +999,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) | |||
925 | return LRU_ROTATE; | 999 | return LRU_ROTATE; |
926 | } | 1000 | } |
927 | 1001 | ||
928 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 1002 | d_lru_shrink_move(dentry, freeable); |
929 | list_move_tail(&dentry->d_lru, freeable); | ||
930 | this_cpu_dec(nr_dentry_unused); | ||
931 | spin_unlock(&dentry->d_lock); | 1003 | spin_unlock(&dentry->d_lock); |
932 | 1004 | ||
933 | return LRU_REMOVED; | 1005 | return LRU_REMOVED; |
@@ -972,9 +1044,7 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, | |||
972 | if (!spin_trylock(&dentry->d_lock)) | 1044 | if (!spin_trylock(&dentry->d_lock)) |
973 | return LRU_SKIP; | 1045 | return LRU_SKIP; |
974 | 1046 | ||
975 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 1047 | d_lru_shrink_move(dentry, freeable); |
976 | list_move_tail(&dentry->d_lru, freeable); | ||
977 | this_cpu_dec(nr_dentry_unused); | ||
978 | spin_unlock(&dentry->d_lock); | 1048 | spin_unlock(&dentry->d_lock); |
979 | 1049 | ||
980 | return LRU_REMOVED; | 1050 | return LRU_REMOVED; |
@@ -1362,9 +1432,13 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) | |||
1362 | if (dentry->d_lockref.count) { | 1432 | if (dentry->d_lockref.count) { |
1363 | dentry_lru_del(dentry); | 1433 | dentry_lru_del(dentry); |
1364 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | 1434 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { |
1365 | dentry_lru_del(dentry); | 1435 | /* |
1366 | list_add_tail(&dentry->d_lru, &data->dispose); | 1436 | * We can't use d_lru_shrink_move() because we |
1367 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 1437 | * need to get the global LRU lock and do the |
1438 | * LRU accounting. | ||
1439 | */ | ||
1440 | d_lru_del(dentry); | ||
1441 | d_shrink_add(dentry, &data->dispose); | ||
1368 | data->found++; | 1442 | data->found++; |
1369 | ret = D_WALK_NORETRY; | 1443 | ret = D_WALK_NORETRY; |
1370 | } | 1444 | } |