aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/jffs2/acl.c3
-rw-r--r--fs/jfs/acl.c13
-rw-r--r--include/linux/posix_acl.h10
-rw-r--r--kernel/futex.c45
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slub.c10
8 files changed, 55 insertions, 41 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 78ad38ddd01f..dbe1aabf96cd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2122,10 +2122,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
2122 * any xattrs or acls 2122 * any xattrs or acls
2123 */ 2123 */
2124 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); 2124 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2125 if (!maybe_acls) { 2125 if (!maybe_acls)
2126 inode->i_acl = NULL; 2126 cache_no_acl(inode);
2127 inode->i_default_acl = NULL;
2128 }
2129 2127
2130 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, 2128 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2131 alloc_group_block, 0); 2129 alloc_group_block, 0);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index edd2ad6416d8..8fcb6239218e 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -284,8 +284,7 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
284 struct posix_acl *acl, *clone; 284 struct posix_acl *acl, *clone;
285 int rc; 285 int rc;
286 286
287 inode->i_default_acl = NULL; 287 cache_no_acl(inode);
288 inode->i_acl = NULL;
289 288
290 if (S_ISLNK(*i_mode)) 289 if (S_ISLNK(*i_mode))
291 return 0; /* Symlink always has no-ACL */ 290 return 0; /* Symlink always has no-ACL */
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index f272bf032e1e..91fa3ad6e8c2 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -118,15 +118,16 @@ out:
118 118
119static int jfs_check_acl(struct inode *inode, int mask) 119static int jfs_check_acl(struct inode *inode, int mask)
120{ 120{
121 if (inode->i_acl == ACL_NOT_CACHED) { 121 struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
122 struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); 122
123 if (IS_ERR(acl)) 123 if (IS_ERR(acl))
124 return PTR_ERR(acl); 124 return PTR_ERR(acl);
125 if (acl) {
126 int error = posix_acl_permission(inode, acl, mask);
125 posix_acl_release(acl); 127 posix_acl_release(acl);
128 return error;
126 } 129 }
127 130
128 if (inode->i_acl)
129 return posix_acl_permission(inode, inode->i_acl, mask);
130 return -EAGAIN; 131 return -EAGAIN;
131} 132}
132 133
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 0cdba01b7756..065a3652a3ea 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -83,6 +83,7 @@ extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
83extern struct posix_acl *get_posix_acl(struct inode *, int); 83extern struct posix_acl *get_posix_acl(struct inode *, int);
84extern int set_posix_acl(struct inode *, int, struct posix_acl *); 84extern int set_posix_acl(struct inode *, int, struct posix_acl *);
85 85
86#ifdef CONFIG_FS_POSIX_ACL
86static inline struct posix_acl *get_cached_acl(struct inode *inode, int type) 87static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
87{ 88{
88 struct posix_acl **p, *acl; 89 struct posix_acl **p, *acl;
@@ -146,5 +147,14 @@ static inline void forget_cached_acl(struct inode *inode, int type)
146 if (old != ACL_NOT_CACHED) 147 if (old != ACL_NOT_CACHED)
147 posix_acl_release(old); 148 posix_acl_release(old);
148} 149}
150#endif
151
152static inline void cache_no_acl(struct inode *inode)
153{
154#ifdef CONFIG_FS_POSIX_ACL
155 inode->i_acl = NULL;
156 inode->i_default_acl = NULL;
157#endif
158}
149 159
150#endif /* __LINUX_POSIX_ACL_H */ 160#endif /* __LINUX_POSIX_ACL_H */
diff --git a/kernel/futex.c b/kernel/futex.c
index 80b5ce716596..1c337112335c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -284,6 +284,25 @@ void put_futex_key(int fshared, union futex_key *key)
284 drop_futex_key_refs(key); 284 drop_futex_key_refs(key);
285} 285}
286 286
287/*
288 * fault_in_user_writeable - fault in user address and verify RW access
289 * @uaddr: pointer to faulting user space address
290 *
291 * Slow path to fixup the fault we just took in the atomic write
292 * access to @uaddr.
293 *
294 * We have no generic implementation of a non destructive write to the
295 * user address. We know that we faulted in the atomic pagefault
296 * disabled section so we can as well avoid the #PF overhead by
297 * calling get_user_pages() right away.
298 */
299static int fault_in_user_writeable(u32 __user *uaddr)
300{
301 int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
302 sizeof(*uaddr), 1, 0, NULL, NULL);
303 return ret < 0 ? ret : 0;
304}
305
287/** 306/**
288 * futex_top_waiter() - Return the highest priority waiter on a futex 307 * futex_top_waiter() - Return the highest priority waiter on a futex
289 * @hb: the hash bucket the futex_q's reside in 308 * @hb: the hash bucket the futex_q's reside in
@@ -896,7 +915,6 @@ retry:
896retry_private: 915retry_private:
897 op_ret = futex_atomic_op_inuser(op, uaddr2); 916 op_ret = futex_atomic_op_inuser(op, uaddr2);
898 if (unlikely(op_ret < 0)) { 917 if (unlikely(op_ret < 0)) {
899 u32 dummy;
900 918
901 double_unlock_hb(hb1, hb2); 919 double_unlock_hb(hb1, hb2);
902 920
@@ -914,7 +932,7 @@ retry_private:
914 goto out_put_keys; 932 goto out_put_keys;
915 } 933 }
916 934
917 ret = get_user(dummy, uaddr2); 935 ret = fault_in_user_writeable(uaddr2);
918 if (ret) 936 if (ret)
919 goto out_put_keys; 937 goto out_put_keys;
920 938
@@ -1204,7 +1222,7 @@ retry_private:
1204 double_unlock_hb(hb1, hb2); 1222 double_unlock_hb(hb1, hb2);
1205 put_futex_key(fshared, &key2); 1223 put_futex_key(fshared, &key2);
1206 put_futex_key(fshared, &key1); 1224 put_futex_key(fshared, &key1);
1207 ret = get_user(curval2, uaddr2); 1225 ret = fault_in_user_writeable(uaddr2);
1208 if (!ret) 1226 if (!ret)
1209 goto retry; 1227 goto retry;
1210 goto out; 1228 goto out;
@@ -1482,7 +1500,7 @@ retry:
1482handle_fault: 1500handle_fault:
1483 spin_unlock(q->lock_ptr); 1501 spin_unlock(q->lock_ptr);
1484 1502
1485 ret = get_user(uval, uaddr); 1503 ret = fault_in_user_writeable(uaddr);
1486 1504
1487 spin_lock(q->lock_ptr); 1505 spin_lock(q->lock_ptr);
1488 1506
@@ -1807,7 +1825,6 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1807{ 1825{
1808 struct hrtimer_sleeper timeout, *to = NULL; 1826 struct hrtimer_sleeper timeout, *to = NULL;
1809 struct futex_hash_bucket *hb; 1827 struct futex_hash_bucket *hb;
1810 u32 uval;
1811 struct futex_q q; 1828 struct futex_q q;
1812 int res, ret; 1829 int res, ret;
1813 1830
@@ -1909,16 +1926,9 @@ out:
1909 return ret != -EINTR ? ret : -ERESTARTNOINTR; 1926 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1910 1927
1911uaddr_faulted: 1928uaddr_faulted:
1912 /*
1913 * We have to r/w *(int __user *)uaddr, and we have to modify it
1914 * atomically. Therefore, if we continue to fault after get_user()
1915 * below, we need to handle the fault ourselves, while still holding
1916 * the mmap_sem. This can occur if the uaddr is under contention as
1917 * we have to drop the mmap_sem in order to call get_user().
1918 */
1919 queue_unlock(&q, hb); 1929 queue_unlock(&q, hb);
1920 1930
1921 ret = get_user(uval, uaddr); 1931 ret = fault_in_user_writeable(uaddr);
1922 if (ret) 1932 if (ret)
1923 goto out_put_key; 1933 goto out_put_key;
1924 1934
@@ -2013,17 +2023,10 @@ out:
2013 return ret; 2023 return ret;
2014 2024
2015pi_faulted: 2025pi_faulted:
2016 /*
2017 * We have to r/w *(int __user *)uaddr, and we have to modify it
2018 * atomically. Therefore, if we continue to fault after get_user()
2019 * below, we need to handle the fault ourselves, while still holding
2020 * the mmap_sem. This can occur if the uaddr is under contention as
2021 * we have to drop the mmap_sem in order to call get_user().
2022 */
2023 spin_unlock(&hb->lock); 2026 spin_unlock(&hb->lock);
2024 put_futex_key(fshared, &key); 2027 put_futex_key(fshared, &key);
2025 2028
2026 ret = get_user(uval, uaddr); 2029 ret = fault_in_user_writeable(uaddr);
2027 if (!ret) 2030 if (!ret)
2028 goto retry; 2031 goto retry;
2029 2032
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aecc9cdfdfce..5d714f8fb303 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1153,10 +1153,10 @@ again:
1153 * properly detect and handle allocation failures. 1153 * properly detect and handle allocation failures.
1154 * 1154 *
1155 * We most definitely don't want callers attempting to 1155 * We most definitely don't want callers attempting to
1156 * allocate greater than single-page units with 1156 * allocate greater than order-1 page units with
1157 * __GFP_NOFAIL. 1157 * __GFP_NOFAIL.
1158 */ 1158 */
1159 WARN_ON_ONCE(order > 0); 1159 WARN_ON_ONCE(order > 1);
1160 } 1160 }
1161 spin_lock_irqsave(&zone->lock, flags); 1161 spin_lock_irqsave(&zone->lock, flags);
1162 page = __rmqueue(zone, order, migratetype); 1162 page = __rmqueue(zone, order, migratetype);
diff --git a/mm/shmem.c b/mm/shmem.c
index 5f2019fc7895..d713239ce2ce 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1558 spin_lock_init(&info->lock); 1558 spin_lock_init(&info->lock);
1559 info->flags = flags & VM_NORESERVE; 1559 info->flags = flags & VM_NORESERVE;
1560 INIT_LIST_HEAD(&info->swaplist); 1560 INIT_LIST_HEAD(&info->swaplist);
1561 cache_no_acl(inode);
1561 1562
1562 switch (mode & S_IFMT) { 1563 switch (mode & S_IFMT) {
1563 default: 1564 default:
@@ -2379,10 +2380,6 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
2379 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2380 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2380 if (!p) 2381 if (!p)
2381 return NULL; 2382 return NULL;
2382#ifdef CONFIG_TMPFS_POSIX_ACL
2383 p->vfs_inode.i_acl = NULL;
2384 p->vfs_inode.i_default_acl = NULL;
2385#endif
2386 return &p->vfs_inode; 2383 return &p->vfs_inode;
2387} 2384}
2388 2385
diff --git a/mm/slub.c b/mm/slub.c
index ce62b770e2fc..819f056b39c6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1085{ 1085{
1086 struct page *page; 1086 struct page *page;
1087 struct kmem_cache_order_objects oo = s->oo; 1087 struct kmem_cache_order_objects oo = s->oo;
1088 gfp_t alloc_gfp;
1088 1089
1089 flags |= s->allocflags; 1090 flags |= s->allocflags;
1090 1091
1091 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, 1092 /*
1092 oo); 1093 * Let the initial higher-order allocation fail under memory pressure
1094 * so we fall-back to the minimum order allocation.
1095 */
1096 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1097
1098 page = alloc_slab_page(alloc_gfp, node, oo);
1093 if (unlikely(!page)) { 1099 if (unlikely(!page)) {
1094 oo = s->min; 1100 oo = s->min;
1095 /* 1101 /*