diff options
author | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 01:49:32 -0500 |
---|---|---|
committer | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 01:50:21 -0500 |
commit | b7ab39f631f505edc2bbdb86620d5493f995c9da (patch) | |
tree | 62be97ebc7fc69ceb601f23312d335ebb8038ee7 /fs/dcache.c | |
parent | 2304450783dfde7b0b94ae234edd0dbffa865073 (diff) |
fs: dcache scale dentry refcount
Make d_count non-atomic and protect it with d_lock. This allows us to ensure a
0 refcount dentry remains 0 without dcache_lock. It is also fairly natural when
we start protecting many other dentry members with d_lock.
Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 106 |
1 files changed, 82 insertions, 24 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 3d3c843c36ed..81e91502b294 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -45,6 +45,7 @@ | |||
45 | * - d_flags | 45 | * - d_flags |
46 | * - d_name | 46 | * - d_name |
47 | * - d_lru | 47 | * - d_lru |
48 | * - d_count | ||
48 | * | 49 | * |
49 | * Ordering: | 50 | * Ordering: |
50 | * dcache_lock | 51 | * dcache_lock |
@@ -125,6 +126,7 @@ static void __d_free(struct rcu_head *head) | |||
125 | */ | 126 | */ |
126 | static void d_free(struct dentry *dentry) | 127 | static void d_free(struct dentry *dentry) |
127 | { | 128 | { |
129 | BUG_ON(dentry->d_count); | ||
128 | this_cpu_dec(nr_dentry); | 130 | this_cpu_dec(nr_dentry); |
129 | if (dentry->d_op && dentry->d_op->d_release) | 131 | if (dentry->d_op && dentry->d_op->d_release) |
130 | dentry->d_op->d_release(dentry); | 132 | dentry->d_op->d_release(dentry); |
@@ -222,8 +224,11 @@ static struct dentry *d_kill(struct dentry *dentry) | |||
222 | struct dentry *parent; | 224 | struct dentry *parent; |
223 | 225 | ||
224 | list_del(&dentry->d_u.d_child); | 226 | list_del(&dentry->d_u.d_child); |
225 | /*drops the locks, at that point nobody can reach this dentry */ | ||
226 | dentry_iput(dentry); | 227 | dentry_iput(dentry); |
228 | /* | ||
229 | * dentry_iput drops the locks, at which point nobody (except | ||
230 | * transient RCU lookups) can reach this dentry. | ||
231 | */ | ||
227 | if (IS_ROOT(dentry)) | 232 | if (IS_ROOT(dentry)) |
228 | parent = NULL; | 233 | parent = NULL; |
229 | else | 234 | else |
@@ -303,13 +308,23 @@ void dput(struct dentry *dentry) | |||
303 | return; | 308 | return; |
304 | 309 | ||
305 | repeat: | 310 | repeat: |
306 | if (atomic_read(&dentry->d_count) == 1) | 311 | if (dentry->d_count == 1) |
307 | might_sleep(); | 312 | might_sleep(); |
308 | if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) | ||
309 | return; | ||
310 | |||
311 | spin_lock(&dentry->d_lock); | 313 | spin_lock(&dentry->d_lock); |
312 | if (atomic_read(&dentry->d_count)) { | 314 | if (dentry->d_count == 1) { |
315 | if (!spin_trylock(&dcache_lock)) { | ||
316 | /* | ||
317 | * Something of a livelock possibility we could avoid | ||
318 | * by taking dcache_lock and trying again, but we | ||
319 | * want to reduce dcache_lock anyway so this will | ||
320 | * get improved. | ||
321 | */ | ||
322 | spin_unlock(&dentry->d_lock); | ||
323 | goto repeat; | ||
324 | } | ||
325 | } | ||
326 | dentry->d_count--; | ||
327 | if (dentry->d_count) { | ||
313 | spin_unlock(&dentry->d_lock); | 328 | spin_unlock(&dentry->d_lock); |
314 | spin_unlock(&dcache_lock); | 329 | spin_unlock(&dcache_lock); |
315 | return; | 330 | return; |
@@ -389,7 +404,7 @@ int d_invalidate(struct dentry * dentry) | |||
389 | * working directory or similar). | 404 | * working directory or similar). |
390 | */ | 405 | */ |
391 | spin_lock(&dentry->d_lock); | 406 | spin_lock(&dentry->d_lock); |
392 | if (atomic_read(&dentry->d_count) > 1) { | 407 | if (dentry->d_count > 1) { |
393 | if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { | 408 | if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { |
394 | spin_unlock(&dentry->d_lock); | 409 | spin_unlock(&dentry->d_lock); |
395 | spin_unlock(&dcache_lock); | 410 | spin_unlock(&dcache_lock); |
@@ -404,29 +419,61 @@ int d_invalidate(struct dentry * dentry) | |||
404 | } | 419 | } |
405 | EXPORT_SYMBOL(d_invalidate); | 420 | EXPORT_SYMBOL(d_invalidate); |
406 | 421 | ||
407 | /* This should be called _only_ with dcache_lock held */ | 422 | /* This must be called with dcache_lock and d_lock held */ |
408 | static inline struct dentry * __dget_locked_dlock(struct dentry *dentry) | 423 | static inline struct dentry * __dget_locked_dlock(struct dentry *dentry) |
409 | { | 424 | { |
410 | atomic_inc(&dentry->d_count); | 425 | dentry->d_count++; |
411 | dentry_lru_del(dentry); | 426 | dentry_lru_del(dentry); |
412 | return dentry; | 427 | return dentry; |
413 | } | 428 | } |
414 | 429 | ||
430 | /* This should be called _only_ with dcache_lock held */ | ||
415 | static inline struct dentry * __dget_locked(struct dentry *dentry) | 431 | static inline struct dentry * __dget_locked(struct dentry *dentry) |
416 | { | 432 | { |
417 | atomic_inc(&dentry->d_count); | ||
418 | spin_lock(&dentry->d_lock); | 433 | spin_lock(&dentry->d_lock); |
419 | dentry_lru_del(dentry); | 434 | __dget_locked_dlock(dentry); |
420 | spin_unlock(&dentry->d_lock); | 435 | spin_unlock(&dentry->d_lock); |
421 | return dentry; | 436 | return dentry; |
422 | } | 437 | } |
423 | 438 | ||
439 | struct dentry * dget_locked_dlock(struct dentry *dentry) | ||
440 | { | ||
441 | return __dget_locked_dlock(dentry); | ||
442 | } | ||
443 | |||
424 | struct dentry * dget_locked(struct dentry *dentry) | 444 | struct dentry * dget_locked(struct dentry *dentry) |
425 | { | 445 | { |
426 | return __dget_locked(dentry); | 446 | return __dget_locked(dentry); |
427 | } | 447 | } |
428 | EXPORT_SYMBOL(dget_locked); | 448 | EXPORT_SYMBOL(dget_locked); |
429 | 449 | ||
450 | struct dentry *dget_parent(struct dentry *dentry) | ||
451 | { | ||
452 | struct dentry *ret; | ||
453 | |||
454 | repeat: | ||
455 | spin_lock(&dentry->d_lock); | ||
456 | ret = dentry->d_parent; | ||
457 | if (!ret) | ||
458 | goto out; | ||
459 | if (dentry == ret) { | ||
460 | ret->d_count++; | ||
461 | goto out; | ||
462 | } | ||
463 | if (!spin_trylock(&ret->d_lock)) { | ||
464 | spin_unlock(&dentry->d_lock); | ||
465 | cpu_relax(); | ||
466 | goto repeat; | ||
467 | } | ||
468 | BUG_ON(!ret->d_count); | ||
469 | ret->d_count++; | ||
470 | spin_unlock(&ret->d_lock); | ||
471 | out: | ||
472 | spin_unlock(&dentry->d_lock); | ||
473 | return ret; | ||
474 | } | ||
475 | EXPORT_SYMBOL(dget_parent); | ||
476 | |||
430 | /** | 477 | /** |
431 | * d_find_alias - grab a hashed alias of inode | 478 | * d_find_alias - grab a hashed alias of inode |
432 | * @inode: inode in question | 479 | * @inode: inode in question |
@@ -495,7 +542,7 @@ restart: | |||
495 | spin_lock(&dcache_lock); | 542 | spin_lock(&dcache_lock); |
496 | list_for_each_entry(dentry, &inode->i_dentry, d_alias) { | 543 | list_for_each_entry(dentry, &inode->i_dentry, d_alias) { |
497 | spin_lock(&dentry->d_lock); | 544 | spin_lock(&dentry->d_lock); |
498 | if (!atomic_read(&dentry->d_count)) { | 545 | if (!dentry->d_count) { |
499 | __dget_locked_dlock(dentry); | 546 | __dget_locked_dlock(dentry); |
500 | __d_drop(dentry); | 547 | __d_drop(dentry); |
501 | spin_unlock(&dentry->d_lock); | 548 | spin_unlock(&dentry->d_lock); |
@@ -530,7 +577,10 @@ static void prune_one_dentry(struct dentry * dentry) | |||
530 | */ | 577 | */ |
531 | while (dentry) { | 578 | while (dentry) { |
532 | spin_lock(&dcache_lock); | 579 | spin_lock(&dcache_lock); |
533 | if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) { | 580 | spin_lock(&dentry->d_lock); |
581 | dentry->d_count--; | ||
582 | if (dentry->d_count) { | ||
583 | spin_unlock(&dentry->d_lock); | ||
534 | spin_unlock(&dcache_lock); | 584 | spin_unlock(&dcache_lock); |
535 | return; | 585 | return; |
536 | } | 586 | } |
@@ -562,7 +612,7 @@ static void shrink_dentry_list(struct list_head *list) | |||
562 | * the LRU because of laziness during lookup. Do not free | 612 | * the LRU because of laziness during lookup. Do not free |
563 | * it - just keep it off the LRU list. | 613 | * it - just keep it off the LRU list. |
564 | */ | 614 | */ |
565 | if (atomic_read(&dentry->d_count)) { | 615 | if (dentry->d_count) { |
566 | spin_unlock(&dentry->d_lock); | 616 | spin_unlock(&dentry->d_lock); |
567 | continue; | 617 | continue; |
568 | } | 618 | } |
@@ -783,7 +833,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
783 | do { | 833 | do { |
784 | struct inode *inode; | 834 | struct inode *inode; |
785 | 835 | ||
786 | if (atomic_read(&dentry->d_count) != 0) { | 836 | if (dentry->d_count != 0) { |
787 | printk(KERN_ERR | 837 | printk(KERN_ERR |
788 | "BUG: Dentry %p{i=%lx,n=%s}" | 838 | "BUG: Dentry %p{i=%lx,n=%s}" |
789 | " still in use (%d)" | 839 | " still in use (%d)" |
@@ -792,7 +842,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
792 | dentry->d_inode ? | 842 | dentry->d_inode ? |
793 | dentry->d_inode->i_ino : 0UL, | 843 | dentry->d_inode->i_ino : 0UL, |
794 | dentry->d_name.name, | 844 | dentry->d_name.name, |
795 | atomic_read(&dentry->d_count), | 845 | dentry->d_count, |
796 | dentry->d_sb->s_type->name, | 846 | dentry->d_sb->s_type->name, |
797 | dentry->d_sb->s_id); | 847 | dentry->d_sb->s_id); |
798 | BUG(); | 848 | BUG(); |
@@ -802,7 +852,9 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
802 | parent = NULL; | 852 | parent = NULL; |
803 | else { | 853 | else { |
804 | parent = dentry->d_parent; | 854 | parent = dentry->d_parent; |
805 | atomic_dec(&parent->d_count); | 855 | spin_lock(&parent->d_lock); |
856 | parent->d_count--; | ||
857 | spin_unlock(&parent->d_lock); | ||
806 | } | 858 | } |
807 | 859 | ||
808 | list_del(&dentry->d_u.d_child); | 860 | list_del(&dentry->d_u.d_child); |
@@ -853,7 +905,9 @@ void shrink_dcache_for_umount(struct super_block *sb) | |||
853 | 905 | ||
854 | dentry = sb->s_root; | 906 | dentry = sb->s_root; |
855 | sb->s_root = NULL; | 907 | sb->s_root = NULL; |
856 | atomic_dec(&dentry->d_count); | 908 | spin_lock(&dentry->d_lock); |
909 | dentry->d_count--; | ||
910 | spin_unlock(&dentry->d_lock); | ||
857 | shrink_dcache_for_umount_subtree(dentry); | 911 | shrink_dcache_for_umount_subtree(dentry); |
858 | 912 | ||
859 | while (!hlist_empty(&sb->s_anon)) { | 913 | while (!hlist_empty(&sb->s_anon)) { |
@@ -950,7 +1004,7 @@ resume: | |||
950 | * move only zero ref count dentries to the end | 1004 | * move only zero ref count dentries to the end |
951 | * of the unused list for prune_dcache | 1005 | * of the unused list for prune_dcache |
952 | */ | 1006 | */ |
953 | if (!atomic_read(&dentry->d_count)) { | 1007 | if (!dentry->d_count) { |
954 | dentry_lru_move_tail(dentry); | 1008 | dentry_lru_move_tail(dentry); |
955 | found++; | 1009 | found++; |
956 | } else { | 1010 | } else { |
@@ -1068,7 +1122,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) | |||
1068 | memcpy(dname, name->name, name->len); | 1122 | memcpy(dname, name->name, name->len); |
1069 | dname[name->len] = 0; | 1123 | dname[name->len] = 0; |
1070 | 1124 | ||
1071 | atomic_set(&dentry->d_count, 1); | 1125 | dentry->d_count = 1; |
1072 | dentry->d_flags = DCACHE_UNHASHED; | 1126 | dentry->d_flags = DCACHE_UNHASHED; |
1073 | spin_lock_init(&dentry->d_lock); | 1127 | spin_lock_init(&dentry->d_lock); |
1074 | dentry->d_inode = NULL; | 1128 | dentry->d_inode = NULL; |
@@ -1556,7 +1610,7 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | |||
1556 | goto next; | 1610 | goto next; |
1557 | } | 1611 | } |
1558 | 1612 | ||
1559 | atomic_inc(&dentry->d_count); | 1613 | dentry->d_count++; |
1560 | found = dentry; | 1614 | found = dentry; |
1561 | spin_unlock(&dentry->d_lock); | 1615 | spin_unlock(&dentry->d_lock); |
1562 | break; | 1616 | break; |
@@ -1653,7 +1707,7 @@ void d_delete(struct dentry * dentry) | |||
1653 | spin_lock(&dcache_lock); | 1707 | spin_lock(&dcache_lock); |
1654 | spin_lock(&dentry->d_lock); | 1708 | spin_lock(&dentry->d_lock); |
1655 | isdir = S_ISDIR(dentry->d_inode->i_mode); | 1709 | isdir = S_ISDIR(dentry->d_inode->i_mode); |
1656 | if (atomic_read(&dentry->d_count) == 1) { | 1710 | if (dentry->d_count == 1) { |
1657 | dentry->d_flags &= ~DCACHE_CANT_MOUNT; | 1711 | dentry->d_flags &= ~DCACHE_CANT_MOUNT; |
1658 | dentry_iput(dentry); | 1712 | dentry_iput(dentry); |
1659 | fsnotify_nameremove(dentry, isdir); | 1713 | fsnotify_nameremove(dentry, isdir); |
@@ -2494,11 +2548,15 @@ resume: | |||
2494 | this_parent = dentry; | 2548 | this_parent = dentry; |
2495 | goto repeat; | 2549 | goto repeat; |
2496 | } | 2550 | } |
2497 | atomic_dec(&dentry->d_count); | 2551 | spin_lock(&dentry->d_lock); |
2552 | dentry->d_count--; | ||
2553 | spin_unlock(&dentry->d_lock); | ||
2498 | } | 2554 | } |
2499 | if (this_parent != root) { | 2555 | if (this_parent != root) { |
2500 | next = this_parent->d_u.d_child.next; | 2556 | next = this_parent->d_u.d_child.next; |
2501 | atomic_dec(&this_parent->d_count); | 2557 | spin_lock(&this_parent->d_lock); |
2558 | this_parent->d_count--; | ||
2559 | spin_unlock(&this_parent->d_lock); | ||
2502 | this_parent = this_parent->d_parent; | 2560 | this_parent = this_parent->d_parent; |
2503 | goto resume; | 2561 | goto resume; |
2504 | } | 2562 | } |