diff options
| -rw-r--r-- | fs/dcache.c | 57 | ||||
| -rw-r--r-- | fs/namei.c | 16 | ||||
| -rw-r--r-- | include/linux/dcache.h | 19 | ||||
| -rw-r--r-- | include/linux/lockref.h | 71 |
4 files changed, 113 insertions, 50 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 83cfb834db03..b949af850cd6 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head) | |||
| 229 | */ | 229 | */ |
| 230 | static void d_free(struct dentry *dentry) | 230 | static void d_free(struct dentry *dentry) |
| 231 | { | 231 | { |
| 232 | BUG_ON(dentry->d_count); | 232 | BUG_ON(dentry->d_lockref.count); |
| 233 | this_cpu_dec(nr_dentry); | 233 | this_cpu_dec(nr_dentry); |
| 234 | if (dentry->d_op && dentry->d_op->d_release) | 234 | if (dentry->d_op && dentry->d_op->d_release) |
| 235 | dentry->d_op->d_release(dentry); | 235 | dentry->d_op->d_release(dentry); |
| @@ -467,7 +467,7 @@ relock: | |||
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | if (ref) | 469 | if (ref) |
| 470 | dentry->d_count--; | 470 | dentry->d_lockref.count--; |
| 471 | /* | 471 | /* |
| 472 | * inform the fs via d_prune that this dentry is about to be | 472 | * inform the fs via d_prune that this dentry is about to be |
| 473 | * unhashed and destroyed. | 473 | * unhashed and destroyed. |
| @@ -513,15 +513,10 @@ void dput(struct dentry *dentry) | |||
| 513 | return; | 513 | return; |
| 514 | 514 | ||
| 515 | repeat: | 515 | repeat: |
| 516 | if (dentry->d_count == 1) | 516 | if (dentry->d_lockref.count == 1) |
| 517 | might_sleep(); | 517 | might_sleep(); |
| 518 | spin_lock(&dentry->d_lock); | 518 | if (lockref_put_or_lock(&dentry->d_lockref)) |
| 519 | BUG_ON(!dentry->d_count); | ||
| 520 | if (dentry->d_count > 1) { | ||
| 521 | dentry->d_count--; | ||
| 522 | spin_unlock(&dentry->d_lock); | ||
| 523 | return; | 519 | return; |
| 524 | } | ||
| 525 | 520 | ||
| 526 | if (dentry->d_flags & DCACHE_OP_DELETE) { | 521 | if (dentry->d_flags & DCACHE_OP_DELETE) { |
| 527 | if (dentry->d_op->d_delete(dentry)) | 522 | if (dentry->d_op->d_delete(dentry)) |
| @@ -535,7 +530,7 @@ repeat: | |||
| 535 | dentry->d_flags |= DCACHE_REFERENCED; | 530 | dentry->d_flags |= DCACHE_REFERENCED; |
| 536 | dentry_lru_add(dentry); | 531 | dentry_lru_add(dentry); |
| 537 | 532 | ||
| 538 | dentry->d_count--; | 533 | dentry->d_lockref.count--; |
| 539 | spin_unlock(&dentry->d_lock); | 534 | spin_unlock(&dentry->d_lock); |
| 540 | return; | 535 | return; |
| 541 | 536 | ||
| @@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry) | |||
| 590 | * We also need to leave mountpoints alone, | 585 | * We also need to leave mountpoints alone, |
| 591 | * directory or not. | 586 | * directory or not. |
| 592 | */ | 587 | */ |
| 593 | if (dentry->d_count > 1 && dentry->d_inode) { | 588 | if (dentry->d_lockref.count > 1 && dentry->d_inode) { |
| 594 | if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { | 589 | if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { |
| 595 | spin_unlock(&dentry->d_lock); | 590 | spin_unlock(&dentry->d_lock); |
| 596 | return -EBUSY; | 591 | return -EBUSY; |
| @@ -606,14 +601,12 @@ EXPORT_SYMBOL(d_invalidate); | |||
| 606 | /* This must be called with d_lock held */ | 601 | /* This must be called with d_lock held */ |
| 607 | static inline void __dget_dlock(struct dentry *dentry) | 602 | static inline void __dget_dlock(struct dentry *dentry) |
| 608 | { | 603 | { |
| 609 | dentry->d_count++; | 604 | dentry->d_lockref.count++; |
| 610 | } | 605 | } |
| 611 | 606 | ||
| 612 | static inline void __dget(struct dentry *dentry) | 607 | static inline void __dget(struct dentry *dentry) |
| 613 | { | 608 | { |
| 614 | spin_lock(&dentry->d_lock); | 609 | lockref_get(&dentry->d_lockref); |
| 615 | __dget_dlock(dentry); | ||
| 616 | spin_unlock(&dentry->d_lock); | ||
| 617 | } | 610 | } |
| 618 | 611 | ||
| 619 | struct dentry *dget_parent(struct dentry *dentry) | 612 | struct dentry *dget_parent(struct dentry *dentry) |
| @@ -634,8 +627,8 @@ repeat: | |||
| 634 | goto repeat; | 627 | goto repeat; |
| 635 | } | 628 | } |
| 636 | rcu_read_unlock(); | 629 | rcu_read_unlock(); |
| 637 | BUG_ON(!ret->d_count); | 630 | BUG_ON(!ret->d_lockref.count); |
| 638 | ret->d_count++; | 631 | ret->d_lockref.count++; |
| 639 | spin_unlock(&ret->d_lock); | 632 | spin_unlock(&ret->d_lock); |
| 640 | return ret; | 633 | return ret; |
| 641 | } | 634 | } |
| @@ -718,7 +711,7 @@ restart: | |||
| 718 | spin_lock(&inode->i_lock); | 711 | spin_lock(&inode->i_lock); |
| 719 | hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { | 712 | hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { |
| 720 | spin_lock(&dentry->d_lock); | 713 | spin_lock(&dentry->d_lock); |
| 721 | if (!dentry->d_count) { | 714 | if (!dentry->d_lockref.count) { |
| 722 | __dget_dlock(dentry); | 715 | __dget_dlock(dentry); |
| 723 | __d_drop(dentry); | 716 | __d_drop(dentry); |
| 724 | spin_unlock(&dentry->d_lock); | 717 | spin_unlock(&dentry->d_lock); |
| @@ -763,12 +756,8 @@ static void try_prune_one_dentry(struct dentry *dentry) | |||
| 763 | /* Prune ancestors. */ | 756 | /* Prune ancestors. */ |
| 764 | dentry = parent; | 757 | dentry = parent; |
| 765 | while (dentry) { | 758 | while (dentry) { |
| 766 | spin_lock(&dentry->d_lock); | 759 | if (lockref_put_or_lock(&dentry->d_lockref)) |
| 767 | if (dentry->d_count > 1) { | ||
| 768 | dentry->d_count--; | ||
| 769 | spin_unlock(&dentry->d_lock); | ||
| 770 | return; | 760 | return; |
| 771 | } | ||
| 772 | dentry = dentry_kill(dentry, 1); | 761 | dentry = dentry_kill(dentry, 1); |
| 773 | } | 762 | } |
| 774 | } | 763 | } |
| @@ -793,7 +782,7 @@ static void shrink_dentry_list(struct list_head *list) | |||
| 793 | * the LRU because of laziness during lookup. Do not free | 782 | * the LRU because of laziness during lookup. Do not free |
| 794 | * it - just keep it off the LRU list. | 783 | * it - just keep it off the LRU list. |
| 795 | */ | 784 | */ |
| 796 | if (dentry->d_count) { | 785 | if (dentry->d_lockref.count) { |
| 797 | dentry_lru_del(dentry); | 786 | dentry_lru_del(dentry); |
| 798 | spin_unlock(&dentry->d_lock); | 787 | spin_unlock(&dentry->d_lock); |
| 799 | continue; | 788 | continue; |
| @@ -913,7 +902,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
| 913 | dentry_lru_del(dentry); | 902 | dentry_lru_del(dentry); |
| 914 | __d_shrink(dentry); | 903 | __d_shrink(dentry); |
| 915 | 904 | ||
| 916 | if (dentry->d_count != 0) { | 905 | if (dentry->d_lockref.count != 0) { |
| 917 | printk(KERN_ERR | 906 | printk(KERN_ERR |
| 918 | "BUG: Dentry %p{i=%lx,n=%s}" | 907 | "BUG: Dentry %p{i=%lx,n=%s}" |
| 919 | " still in use (%d)" | 908 | " still in use (%d)" |
| @@ -922,7 +911,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
| 922 | dentry->d_inode ? | 911 | dentry->d_inode ? |
| 923 | dentry->d_inode->i_ino : 0UL, | 912 | dentry->d_inode->i_ino : 0UL, |
| 924 | dentry->d_name.name, | 913 | dentry->d_name.name, |
| 925 | dentry->d_count, | 914 | dentry->d_lockref.count, |
| 926 | dentry->d_sb->s_type->name, | 915 | dentry->d_sb->s_type->name, |
| 927 | dentry->d_sb->s_id); | 916 | dentry->d_sb->s_id); |
| 928 | BUG(); | 917 | BUG(); |
| @@ -933,7 +922,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
| 933 | list_del(&dentry->d_u.d_child); | 922 | list_del(&dentry->d_u.d_child); |
| 934 | } else { | 923 | } else { |
| 935 | parent = dentry->d_parent; | 924 | parent = dentry->d_parent; |
| 936 | parent->d_count--; | 925 | parent->d_lockref.count--; |
| 937 | list_del(&dentry->d_u.d_child); | 926 | list_del(&dentry->d_u.d_child); |
| 938 | } | 927 | } |
| 939 | 928 | ||
| @@ -981,7 +970,7 @@ void shrink_dcache_for_umount(struct super_block *sb) | |||
| 981 | 970 | ||
| 982 | dentry = sb->s_root; | 971 | dentry = sb->s_root; |
| 983 | sb->s_root = NULL; | 972 | sb->s_root = NULL; |
| 984 | dentry->d_count--; | 973 | dentry->d_lockref.count--; |
| 985 | shrink_dcache_for_umount_subtree(dentry); | 974 | shrink_dcache_for_umount_subtree(dentry); |
| 986 | 975 | ||
| 987 | while (!hlist_bl_empty(&sb->s_anon)) { | 976 | while (!hlist_bl_empty(&sb->s_anon)) { |
| @@ -1147,7 +1136,7 @@ resume: | |||
| 1147 | * loop in shrink_dcache_parent() might not make any progress | 1136 | * loop in shrink_dcache_parent() might not make any progress |
| 1148 | * and loop forever. | 1137 | * and loop forever. |
| 1149 | */ | 1138 | */ |
| 1150 | if (dentry->d_count) { | 1139 | if (dentry->d_lockref.count) { |
| 1151 | dentry_lru_del(dentry); | 1140 | dentry_lru_del(dentry); |
| 1152 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | 1141 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { |
| 1153 | dentry_lru_move_list(dentry, dispose); | 1142 | dentry_lru_move_list(dentry, dispose); |
| @@ -1269,7 +1258,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) | |||
| 1269 | smp_wmb(); | 1258 | smp_wmb(); |
| 1270 | dentry->d_name.name = dname; | 1259 | dentry->d_name.name = dname; |
| 1271 | 1260 | ||
| 1272 | dentry->d_count = 1; | 1261 | dentry->d_lockref.count = 1; |
| 1273 | dentry->d_flags = 0; | 1262 | dentry->d_flags = 0; |
| 1274 | spin_lock_init(&dentry->d_lock); | 1263 | spin_lock_init(&dentry->d_lock); |
| 1275 | seqcount_init(&dentry->d_seq); | 1264 | seqcount_init(&dentry->d_seq); |
| @@ -1970,7 +1959,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) | |||
| 1970 | goto next; | 1959 | goto next; |
| 1971 | } | 1960 | } |
| 1972 | 1961 | ||
| 1973 | dentry->d_count++; | 1962 | dentry->d_lockref.count++; |
| 1974 | found = dentry; | 1963 | found = dentry; |
| 1975 | spin_unlock(&dentry->d_lock); | 1964 | spin_unlock(&dentry->d_lock); |
| 1976 | break; | 1965 | break; |
| @@ -2069,7 +2058,7 @@ again: | |||
| 2069 | spin_lock(&dentry->d_lock); | 2058 | spin_lock(&dentry->d_lock); |
| 2070 | inode = dentry->d_inode; | 2059 | inode = dentry->d_inode; |
| 2071 | isdir = S_ISDIR(inode->i_mode); | 2060 | isdir = S_ISDIR(inode->i_mode); |
| 2072 | if (dentry->d_count == 1) { | 2061 | if (dentry->d_lockref.count == 1) { |
| 2073 | if (!spin_trylock(&inode->i_lock)) { | 2062 | if (!spin_trylock(&inode->i_lock)) { |
| 2074 | spin_unlock(&dentry->d_lock); | 2063 | spin_unlock(&dentry->d_lock); |
| 2075 | cpu_relax(); | 2064 | cpu_relax(); |
| @@ -2948,7 +2937,7 @@ resume: | |||
| 2948 | } | 2937 | } |
| 2949 | if (!(dentry->d_flags & DCACHE_GENOCIDE)) { | 2938 | if (!(dentry->d_flags & DCACHE_GENOCIDE)) { |
| 2950 | dentry->d_flags |= DCACHE_GENOCIDE; | 2939 | dentry->d_flags |= DCACHE_GENOCIDE; |
| 2951 | dentry->d_count--; | 2940 | dentry->d_lockref.count--; |
| 2952 | } | 2941 | } |
| 2953 | spin_unlock(&dentry->d_lock); | 2942 | spin_unlock(&dentry->d_lock); |
| 2954 | } | 2943 | } |
| @@ -2956,7 +2945,7 @@ resume: | |||
| 2956 | struct dentry *child = this_parent; | 2945 | struct dentry *child = this_parent; |
| 2957 | if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { | 2946 | if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { |
| 2958 | this_parent->d_flags |= DCACHE_GENOCIDE; | 2947 | this_parent->d_flags |= DCACHE_GENOCIDE; |
| 2959 | this_parent->d_count--; | 2948 | this_parent->d_lockref.count--; |
| 2960 | } | 2949 | } |
| 2961 | this_parent = try_to_ascend(this_parent, locked, seq); | 2950 | this_parent = try_to_ascend(this_parent, locked, seq); |
| 2962 | if (!this_parent) | 2951 | if (!this_parent) |
diff --git a/fs/namei.c b/fs/namei.c index 89a612e392eb..7720fbd5277b 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -536,8 +536,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) | |||
| 536 | * a reference at this point. | 536 | * a reference at this point. |
| 537 | */ | 537 | */ |
| 538 | BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); | 538 | BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); |
| 539 | BUG_ON(!parent->d_count); | 539 | BUG_ON(!parent->d_lockref.count); |
| 540 | parent->d_count++; | 540 | parent->d_lockref.count++; |
| 541 | spin_unlock(&dentry->d_lock); | 541 | spin_unlock(&dentry->d_lock); |
| 542 | } | 542 | } |
| 543 | spin_unlock(&parent->d_lock); | 543 | spin_unlock(&parent->d_lock); |
| @@ -3327,7 +3327,7 @@ void dentry_unhash(struct dentry *dentry) | |||
| 3327 | { | 3327 | { |
| 3328 | shrink_dcache_parent(dentry); | 3328 | shrink_dcache_parent(dentry); |
| 3329 | spin_lock(&dentry->d_lock); | 3329 | spin_lock(&dentry->d_lock); |
| 3330 | if (dentry->d_count == 1) | 3330 | if (dentry->d_lockref.count == 1) |
| 3331 | __d_drop(dentry); | 3331 | __d_drop(dentry); |
| 3332 | spin_unlock(&dentry->d_lock); | 3332 | spin_unlock(&dentry->d_lock); |
| 3333 | } | 3333 | } |
| @@ -3671,11 +3671,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, | |||
| 3671 | if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) | 3671 | if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) |
| 3672 | return -EINVAL; | 3672 | return -EINVAL; |
| 3673 | /* | 3673 | /* |
| 3674 | * Using empty names is equivalent to using AT_SYMLINK_FOLLOW | 3674 | * To use null names we require CAP_DAC_READ_SEARCH |
| 3675 | * on /proc/self/fd/<fd>. | 3675 | * This ensures that not everyone will be able to create |
| 3676 | * handlink using the passed filedescriptor. | ||
| 3676 | */ | 3677 | */ |
| 3677 | if (flags & AT_EMPTY_PATH) | 3678 | if (flags & AT_EMPTY_PATH) { |
| 3679 | if (!capable(CAP_DAC_READ_SEARCH)) | ||
| 3680 | return -ENOENT; | ||
| 3678 | how = LOOKUP_EMPTY; | 3681 | how = LOOKUP_EMPTY; |
| 3682 | } | ||
| 3679 | 3683 | ||
| 3680 | if (flags & AT_SYMLINK_FOLLOW) | 3684 | if (flags & AT_SYMLINK_FOLLOW) |
| 3681 | how |= LOOKUP_FOLLOW; | 3685 | how |= LOOKUP_FOLLOW; |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 4a12532da8c4..efdc94434c30 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/seqlock.h> | 9 | #include <linux/seqlock.h> |
| 10 | #include <linux/cache.h> | 10 | #include <linux/cache.h> |
| 11 | #include <linux/rcupdate.h> | 11 | #include <linux/rcupdate.h> |
| 12 | #include <linux/lockref.h> | ||
| 12 | 13 | ||
| 13 | struct nameidata; | 14 | struct nameidata; |
| 14 | struct path; | 15 | struct path; |
| @@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int); | |||
| 100 | # endif | 101 | # endif |
| 101 | #endif | 102 | #endif |
| 102 | 103 | ||
| 104 | #define d_lock d_lockref.lock | ||
| 105 | |||
| 103 | struct dentry { | 106 | struct dentry { |
| 104 | /* RCU lookup touched fields */ | 107 | /* RCU lookup touched fields */ |
| 105 | unsigned int d_flags; /* protected by d_lock */ | 108 | unsigned int d_flags; /* protected by d_lock */ |
| @@ -112,8 +115,7 @@ struct dentry { | |||
| 112 | unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ | 115 | unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ |
| 113 | 116 | ||
| 114 | /* Ref lookup also touches following */ | 117 | /* Ref lookup also touches following */ |
| 115 | unsigned int d_count; /* protected by d_lock */ | 118 | struct lockref d_lockref; /* per-dentry lock and refcount */ |
| 116 | spinlock_t d_lock; /* per dentry lock */ | ||
| 117 | const struct dentry_operations *d_op; | 119 | const struct dentry_operations *d_op; |
| 118 | struct super_block *d_sb; /* The root of the dentry tree */ | 120 | struct super_block *d_sb; /* The root of the dentry tree */ |
| 119 | unsigned long d_time; /* used by d_revalidate */ | 121 | unsigned long d_time; /* used by d_revalidate */ |
| @@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) | |||
| 318 | assert_spin_locked(&dentry->d_lock); | 320 | assert_spin_locked(&dentry->d_lock); |
| 319 | if (!read_seqcount_retry(&dentry->d_seq, seq)) { | 321 | if (!read_seqcount_retry(&dentry->d_seq, seq)) { |
| 320 | ret = 1; | 322 | ret = 1; |
| 321 | dentry->d_count++; | 323 | dentry->d_lockref.count++; |
| 322 | } | 324 | } |
| 323 | 325 | ||
| 324 | return ret; | 326 | return ret; |
| @@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) | |||
| 326 | 328 | ||
| 327 | static inline unsigned d_count(const struct dentry *dentry) | 329 | static inline unsigned d_count(const struct dentry *dentry) |
| 328 | { | 330 | { |
| 329 | return dentry->d_count; | 331 | return dentry->d_lockref.count; |
| 330 | } | 332 | } |
| 331 | 333 | ||
| 332 | /* validate "insecure" dentry pointer */ | 334 | /* validate "insecure" dentry pointer */ |
| @@ -357,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int); | |||
| 357 | static inline struct dentry *dget_dlock(struct dentry *dentry) | 359 | static inline struct dentry *dget_dlock(struct dentry *dentry) |
| 358 | { | 360 | { |
| 359 | if (dentry) | 361 | if (dentry) |
| 360 | dentry->d_count++; | 362 | dentry->d_lockref.count++; |
| 361 | return dentry; | 363 | return dentry; |
| 362 | } | 364 | } |
| 363 | 365 | ||
| 364 | static inline struct dentry *dget(struct dentry *dentry) | 366 | static inline struct dentry *dget(struct dentry *dentry) |
| 365 | { | 367 | { |
| 366 | if (dentry) { | 368 | if (dentry) |
| 367 | spin_lock(&dentry->d_lock); | 369 | lockref_get(&dentry->d_lockref); |
| 368 | dget_dlock(dentry); | ||
| 369 | spin_unlock(&dentry->d_lock); | ||
| 370 | } | ||
| 371 | return dentry; | 370 | return dentry; |
| 372 | } | 371 | } |
| 373 | 372 | ||
diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000000..01233e01627a --- /dev/null +++ b/include/linux/lockref.h | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | #ifndef __LINUX_LOCKREF_H | ||
| 2 | #define __LINUX_LOCKREF_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Locked reference counts. | ||
| 6 | * | ||
| 7 | * These are different from just plain atomic refcounts in that they | ||
| 8 | * are atomic with respect to the spinlock that goes with them. In | ||
| 9 | * particular, there can be implementations that don't actually get | ||
| 10 | * the spinlock for the common decrement/increment operations, but they | ||
| 11 | * still have to check that the operation is done semantically as if | ||
| 12 | * the spinlock had been taken (using a cmpxchg operation that covers | ||
| 13 | * both the lock and the count word, or using memory transactions, for | ||
| 14 | * example). | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/spinlock.h> | ||
| 18 | |||
| 19 | struct lockref { | ||
| 20 | spinlock_t lock; | ||
| 21 | unsigned int count; | ||
| 22 | }; | ||
| 23 | |||
| 24 | /** | ||
| 25 | * lockref_get - Increments reference count unconditionally | ||
| 26 | * @lockcnt: pointer to lockref structure | ||
| 27 | * | ||
| 28 | * This operation is only valid if you already hold a reference | ||
| 29 | * to the object, so you know the count cannot be zero. | ||
| 30 | */ | ||
| 31 | static inline void lockref_get(struct lockref *lockref) | ||
| 32 | { | ||
| 33 | spin_lock(&lockref->lock); | ||
| 34 | lockref->count++; | ||
| 35 | spin_unlock(&lockref->lock); | ||
| 36 | } | ||
| 37 | |||
| 38 | /** | ||
| 39 | * lockref_get_not_zero - Increments count unless the count is 0 | ||
| 40 | * @lockcnt: pointer to lockref structure | ||
| 41 | * Return: 1 if count updated successfully or 0 if count is 0 | ||
| 42 | */ | ||
| 43 | static inline int lockref_get_not_zero(struct lockref *lockref) | ||
| 44 | { | ||
| 45 | int retval = 0; | ||
| 46 | |||
| 47 | spin_lock(&lockref->lock); | ||
| 48 | if (lockref->count) { | ||
| 49 | lockref->count++; | ||
| 50 | retval = 1; | ||
| 51 | } | ||
| 52 | spin_unlock(&lockref->lock); | ||
| 53 | return retval; | ||
| 54 | } | ||
| 55 | |||
| 56 | /** | ||
| 57 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | ||
| 58 | * @lockcnt: pointer to lockref structure | ||
| 59 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken | ||
| 60 | */ | ||
| 61 | static inline int lockref_put_or_lock(struct lockref *lockref) | ||
| 62 | { | ||
| 63 | spin_lock(&lockref->lock); | ||
| 64 | if (lockref->count <= 1) | ||
| 65 | return 0; | ||
| 66 | lockref->count--; | ||
| 67 | spin_unlock(&lockref->lock); | ||
| 68 | return 1; | ||
| 69 | } | ||
| 70 | |||
| 71 | #endif /* __LINUX_LOCKREF_H */ | ||
