aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/namei.c17
-rw-r--r--include/linux/posix_acl.h18
2 files changed, 15 insertions, 20 deletions
diff --git a/fs/namei.c b/fs/namei.c
index 445fd5da11fa..3d607bd80e09 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -179,19 +179,14 @@ static int check_acl(struct inode *inode, int mask)
179#ifdef CONFIG_FS_POSIX_ACL 179#ifdef CONFIG_FS_POSIX_ACL
180 struct posix_acl *acl; 180 struct posix_acl *acl;
181 181
182 /*
183 * Under RCU walk, we cannot even do a "get_cached_acl()",
184 * because that involves locking and getting a refcount on
185 * a cached ACL.
186 *
187 * So the only case we handle during RCU walking is the
188 * case of a cached "no ACL at all", which needs no locks
189 * or refcounts.
190 */
191 if (mask & MAY_NOT_BLOCK) { 182 if (mask & MAY_NOT_BLOCK) {
192 if (negative_cached_acl(inode, ACL_TYPE_ACCESS)) 183 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
184 if (!acl)
193 return -EAGAIN; 185 return -EAGAIN;
194 return -ECHILD; 186 /* no ->get_acl() calls in RCU mode... */
187 if (acl == ACL_NOT_CACHED)
188 return -ECHILD;
189 return posix_acl_permission(inode, acl, mask);
195 } 190 }
196 191
197 acl = get_cached_acl(inode, ACL_TYPE_ACCESS); 192 acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index a9c2fb29be96..b7681102a4b9 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -9,6 +9,7 @@
9#define __LINUX_POSIX_ACL_H 9#define __LINUX_POSIX_ACL_H
10 10
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/rcupdate.h>
12 13
13#define ACL_UNDEFINED_ID (-1) 14#define ACL_UNDEFINED_ID (-1)
14 15
@@ -38,7 +39,10 @@ struct posix_acl_entry {
38}; 39};
39 40
40struct posix_acl { 41struct posix_acl {
41 atomic_t a_refcount; 42 union {
43 atomic_t a_refcount;
44 struct rcu_head a_rcu;
45 };
42 unsigned int a_count; 46 unsigned int a_count;
43 struct posix_acl_entry a_entries[0]; 47 struct posix_acl_entry a_entries[0];
44}; 48};
@@ -65,7 +69,7 @@ static inline void
65posix_acl_release(struct posix_acl *acl) 69posix_acl_release(struct posix_acl *acl)
66{ 70{
67 if (acl && atomic_dec_and_test(&acl->a_refcount)) 71 if (acl && atomic_dec_and_test(&acl->a_refcount))
68 kfree(acl); 72 kfree_rcu(acl, a_rcu);
69} 73}
70 74
71 75
@@ -110,13 +114,9 @@ static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
110 return acl; 114 return acl;
111} 115}
112 116
113static inline int negative_cached_acl(struct inode *inode, int type) 117static inline struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type)
114{ 118{
115 struct posix_acl **p = acl_by_type(inode, type); 119 return rcu_dereference(*acl_by_type(inode, type));
116 struct posix_acl *acl = ACCESS_ONCE(*p);
117 if (acl)
118 return 0;
119 return 1;
120} 120}
121 121
122static inline void set_cached_acl(struct inode *inode, 122static inline void set_cached_acl(struct inode *inode,
@@ -127,7 +127,7 @@ static inline void set_cached_acl(struct inode *inode,
127 struct posix_acl *old; 127 struct posix_acl *old;
128 spin_lock(&inode->i_lock); 128 spin_lock(&inode->i_lock);
129 old = *p; 129 old = *p;
130 *p = posix_dup_acl(acl); 130 rcu_assign_pointer(*p, posix_acl_dup(acl));
131 spin_unlock(&inode->i_lock); 131 spin_unlock(&inode->i_lock);
132 if (old != ACL_NOT_CACHED) 132 if (old != ACL_NOT_CACHED)
133 posix_acl_release(old); 133 posix_acl_release(old);