diff options
| author | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-02-23 14:02:19 -0500 |
|---|---|---|
| committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-02-23 14:02:19 -0500 |
| commit | 99a85b901eb54f62ff0c3fd6eb56e60b7b9f15c8 (patch) | |
| tree | 0c6637b7d2172e079c30e966847326767cbaf45c /include/linux/percpu-refcount.h | |
| parent | 135f9be9194cf7778eb73594aa55791b229cf27c (diff) | |
| parent | c517d838eb7d07bbe9507871fab3931deccff539 (diff) | |
Merge tag 'v4.0-rc1' into patchwork
Linux 34.0-rc1
* tag 'v4.0-rc1': (8947 commits)
Linux 4.0-rc1
autofs4 copy_dev_ioctl(): keep the value of ->size we'd used for allocation
procfs: fix race between symlink removals and traversals
debugfs: leave freeing a symlink body until inode eviction
Documentation/filesystems/Locking: ->get_sb() is long gone
trylock_super(): replacement for grab_super_passive()
fanotify: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions
Cachefiles: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions
VFS: (Scripted) Convert S_ISLNK/DIR/REG(dentry->d_inode) to d_is_*(dentry)
SELinux: Use d_is_positive() rather than testing dentry->d_inode
Smack: Use d_is_positive() rather than testing dentry->d_inode
TOMOYO: Use d_is_dir() rather than d_inode and S_ISDIR()
Apparmor: Use d_is_positive/negative() rather than testing dentry->d_inode
Apparmor: mediated_filesystem() should use dentry->d_sb not inode->i_sb
VFS: Split DCACHE_FILE_TYPE into regular and special types
VFS: Add a fallthrough flag for marking virtual dentries
VFS: Add a whiteout dentry type
VFS: Introduce inode-getting helpers for layered/unioned fs environments
kernel: make READ_ONCE() valid on const arguments
blk-throttle: check stats_cpu before reading it from sysfs
...
Diffstat (limited to 'include/linux/percpu-refcount.h')
| -rw-r--r-- | include/linux/percpu-refcount.h | 34 |
1 files changed, 31 insertions, 3 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index b4337646388b..12c9b485beb7 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
| @@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
| 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, | 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, |
| 129 | unsigned long __percpu **percpu_countp) | 129 | unsigned long __percpu **percpu_countp) |
| 130 | { | 130 | { |
| 131 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 131 | unsigned long percpu_ptr; |
| 132 | unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); | 132 | |
| 133 | /* | ||
| 134 | * The value of @ref->percpu_count_ptr is tested for | ||
| 135 | * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then | ||
| 136 | * used as a pointer. If the compiler generates a separate fetch | ||
| 137 | * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in | ||
| 138 | * between contaminating the pointer value, meaning that | ||
| 139 | * ACCESS_ONCE() is required when fetching it. | ||
| 140 | * | ||
| 141 | * Also, we need a data dependency barrier to be paired with | ||
| 142 | * smp_store_release() in __percpu_ref_switch_to_percpu(). | ||
| 143 | * | ||
| 144 | * Use lockless deref which contains both. | ||
| 145 | */ | ||
| 146 | percpu_ptr = lockless_dereference(ref->percpu_count_ptr); | ||
| 133 | 147 | ||
| 134 | /* | 148 | /* |
| 135 | * Theoretically, the following could test just ATOMIC; however, | 149 | * Theoretically, the following could test just ATOMIC; however, |
| @@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
| 233 | if (__ref_is_percpu(ref, &percpu_count)) { | 247 | if (__ref_is_percpu(ref, &percpu_count)) { |
| 234 | this_cpu_inc(*percpu_count); | 248 | this_cpu_inc(*percpu_count); |
| 235 | ret = true; | 249 | ret = true; |
| 236 | } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { | 250 | } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { |
| 237 | ret = atomic_long_inc_not_zero(&ref->count); | 251 | ret = atomic_long_inc_not_zero(&ref->count); |
| 238 | } | 252 | } |
| 239 | 253 | ||
| @@ -281,6 +295,20 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
| 281 | } | 295 | } |
| 282 | 296 | ||
| 283 | /** | 297 | /** |
| 298 | * percpu_ref_is_dying - test whether a percpu refcount is dying or dead | ||
| 299 | * @ref: percpu_ref to test | ||
| 300 | * | ||
| 301 | * Returns %true if @ref is dying or dead. | ||
| 302 | * | ||
| 303 | * This function is safe to call as long as @ref is between init and exit | ||
| 304 | * and the caller is responsible for synchronizing against state changes. | ||
| 305 | */ | ||
| 306 | static inline bool percpu_ref_is_dying(struct percpu_ref *ref) | ||
| 307 | { | ||
| 308 | return ref->percpu_count_ptr & __PERCPU_REF_DEAD; | ||
| 309 | } | ||
| 310 | |||
| 311 | /** | ||
| 284 | * percpu_ref_is_zero - test whether a percpu refcount reached zero | 312 | * percpu_ref_is_zero - test whether a percpu refcount reached zero |
| 285 | * @ref: percpu_ref to test | 313 | * @ref: percpu_ref to test |
| 286 | * | 314 | * |
