diff options
| -rw-r--r-- | kernel/Makefile | 4 | ||||
| -rw-r--r-- | kernel/futex.c | 45 | ||||
| -rw-r--r-- | mm/page_alloc.c | 4 | ||||
| -rw-r--r-- | mm/slub.c | 10 |
4 files changed, 36 insertions, 27 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index da750010a6fc..780c8dcf4516 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -69,8 +69,8 @@ obj-$(CONFIG_IKCONFIG) += configs.o | |||
| 69 | obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o | 69 | obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o |
| 70 | obj-$(CONFIG_STOP_MACHINE) += stop_machine.o | 70 | obj-$(CONFIG_STOP_MACHINE) += stop_machine.o |
| 71 | obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o | 71 | obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o |
| 72 | obj-$(CONFIG_AUDIT) += audit.o auditfilter.o | 72 | obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o |
| 73 | obj-$(CONFIG_AUDITSYSCALL) += auditsc.o audit_watch.o | 73 | obj-$(CONFIG_AUDITSYSCALL) += auditsc.o |
| 74 | obj-$(CONFIG_GCOV_KERNEL) += gcov/ | 74 | obj-$(CONFIG_GCOV_KERNEL) += gcov/ |
| 75 | obj-$(CONFIG_AUDIT_TREE) += audit_tree.o | 75 | obj-$(CONFIG_AUDIT_TREE) += audit_tree.o |
| 76 | obj-$(CONFIG_KPROBES) += kprobes.o | 76 | obj-$(CONFIG_KPROBES) += kprobes.o |
diff --git a/kernel/futex.c b/kernel/futex.c index 80b5ce716596..1c337112335c 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -284,6 +284,25 @@ void put_futex_key(int fshared, union futex_key *key) | |||
| 284 | drop_futex_key_refs(key); | 284 | drop_futex_key_refs(key); |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | /* | ||
| 288 | * fault_in_user_writeable - fault in user address and verify RW access | ||
| 289 | * @uaddr: pointer to faulting user space address | ||
| 290 | * | ||
| 291 | * Slow path to fixup the fault we just took in the atomic write | ||
| 292 | * access to @uaddr. | ||
| 293 | * | ||
| 294 | * We have no generic implementation of a non destructive write to the | ||
| 295 | * user address. We know that we faulted in the atomic pagefault | ||
| 296 | * disabled section so we can as well avoid the #PF overhead by | ||
| 297 | * calling get_user_pages() right away. | ||
| 298 | */ | ||
| 299 | static int fault_in_user_writeable(u32 __user *uaddr) | ||
| 300 | { | ||
| 301 | int ret = get_user_pages(current, current->mm, (unsigned long)uaddr, | ||
| 302 | sizeof(*uaddr), 1, 0, NULL, NULL); | ||
| 303 | return ret < 0 ? ret : 0; | ||
| 304 | } | ||
| 305 | |||
| 287 | /** | 306 | /** |
| 288 | * futex_top_waiter() - Return the highest priority waiter on a futex | 307 | * futex_top_waiter() - Return the highest priority waiter on a futex |
| 289 | * @hb: the hash bucket the futex_q's reside in | 308 | * @hb: the hash bucket the futex_q's reside in |
| @@ -896,7 +915,6 @@ retry: | |||
| 896 | retry_private: | 915 | retry_private: |
| 897 | op_ret = futex_atomic_op_inuser(op, uaddr2); | 916 | op_ret = futex_atomic_op_inuser(op, uaddr2); |
| 898 | if (unlikely(op_ret < 0)) { | 917 | if (unlikely(op_ret < 0)) { |
| 899 | u32 dummy; | ||
| 900 | 918 | ||
| 901 | double_unlock_hb(hb1, hb2); | 919 | double_unlock_hb(hb1, hb2); |
| 902 | 920 | ||
| @@ -914,7 +932,7 @@ retry_private: | |||
| 914 | goto out_put_keys; | 932 | goto out_put_keys; |
| 915 | } | 933 | } |
| 916 | 934 | ||
| 917 | ret = get_user(dummy, uaddr2); | 935 | ret = fault_in_user_writeable(uaddr2); |
| 918 | if (ret) | 936 | if (ret) |
| 919 | goto out_put_keys; | 937 | goto out_put_keys; |
| 920 | 938 | ||
| @@ -1204,7 +1222,7 @@ retry_private: | |||
| 1204 | double_unlock_hb(hb1, hb2); | 1222 | double_unlock_hb(hb1, hb2); |
| 1205 | put_futex_key(fshared, &key2); | 1223 | put_futex_key(fshared, &key2); |
| 1206 | put_futex_key(fshared, &key1); | 1224 | put_futex_key(fshared, &key1); |
| 1207 | ret = get_user(curval2, uaddr2); | 1225 | ret = fault_in_user_writeable(uaddr2); |
| 1208 | if (!ret) | 1226 | if (!ret) |
| 1209 | goto retry; | 1227 | goto retry; |
| 1210 | goto out; | 1228 | goto out; |
| @@ -1482,7 +1500,7 @@ retry: | |||
| 1482 | handle_fault: | 1500 | handle_fault: |
| 1483 | spin_unlock(q->lock_ptr); | 1501 | spin_unlock(q->lock_ptr); |
| 1484 | 1502 | ||
| 1485 | ret = get_user(uval, uaddr); | 1503 | ret = fault_in_user_writeable(uaddr); |
| 1486 | 1504 | ||
| 1487 | spin_lock(q->lock_ptr); | 1505 | spin_lock(q->lock_ptr); |
| 1488 | 1506 | ||
| @@ -1807,7 +1825,6 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1807 | { | 1825 | { |
| 1808 | struct hrtimer_sleeper timeout, *to = NULL; | 1826 | struct hrtimer_sleeper timeout, *to = NULL; |
| 1809 | struct futex_hash_bucket *hb; | 1827 | struct futex_hash_bucket *hb; |
| 1810 | u32 uval; | ||
| 1811 | struct futex_q q; | 1828 | struct futex_q q; |
| 1812 | int res, ret; | 1829 | int res, ret; |
| 1813 | 1830 | ||
| @@ -1909,16 +1926,9 @@ out: | |||
| 1909 | return ret != -EINTR ? ret : -ERESTARTNOINTR; | 1926 | return ret != -EINTR ? ret : -ERESTARTNOINTR; |
| 1910 | 1927 | ||
| 1911 | uaddr_faulted: | 1928 | uaddr_faulted: |
| 1912 | /* | ||
| 1913 | * We have to r/w *(int __user *)uaddr, and we have to modify it | ||
| 1914 | * atomically. Therefore, if we continue to fault after get_user() | ||
| 1915 | * below, we need to handle the fault ourselves, while still holding | ||
| 1916 | * the mmap_sem. This can occur if the uaddr is under contention as | ||
| 1917 | * we have to drop the mmap_sem in order to call get_user(). | ||
| 1918 | */ | ||
| 1919 | queue_unlock(&q, hb); | 1929 | queue_unlock(&q, hb); |
| 1920 | 1930 | ||
| 1921 | ret = get_user(uval, uaddr); | 1931 | ret = fault_in_user_writeable(uaddr); |
| 1922 | if (ret) | 1932 | if (ret) |
| 1923 | goto out_put_key; | 1933 | goto out_put_key; |
| 1924 | 1934 | ||
| @@ -2013,17 +2023,10 @@ out: | |||
| 2013 | return ret; | 2023 | return ret; |
| 2014 | 2024 | ||
| 2015 | pi_faulted: | 2025 | pi_faulted: |
| 2016 | /* | ||
| 2017 | * We have to r/w *(int __user *)uaddr, and we have to modify it | ||
| 2018 | * atomically. Therefore, if we continue to fault after get_user() | ||
| 2019 | * below, we need to handle the fault ourselves, while still holding | ||
| 2020 | * the mmap_sem. This can occur if the uaddr is under contention as | ||
| 2021 | * we have to drop the mmap_sem in order to call get_user(). | ||
| 2022 | */ | ||
| 2023 | spin_unlock(&hb->lock); | 2026 | spin_unlock(&hb->lock); |
| 2024 | put_futex_key(fshared, &key); | 2027 | put_futex_key(fshared, &key); |
| 2025 | 2028 | ||
| 2026 | ret = get_user(uval, uaddr); | 2029 | ret = fault_in_user_writeable(uaddr); |
| 2027 | if (!ret) | 2030 | if (!ret) |
| 2028 | goto retry; | 2031 | goto retry; |
| 2029 | 2032 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aecc9cdfdfce..5d714f8fb303 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -1153,10 +1153,10 @@ again: | |||
| 1153 | * properly detect and handle allocation failures. | 1153 | * properly detect and handle allocation failures. |
| 1154 | * | 1154 | * |
| 1155 | * We most definitely don't want callers attempting to | 1155 | * We most definitely don't want callers attempting to |
| 1156 | * allocate greater than single-page units with | 1156 | * allocate greater than order-1 page units with |
| 1157 | * __GFP_NOFAIL. | 1157 | * __GFP_NOFAIL. |
| 1158 | */ | 1158 | */ |
| 1159 | WARN_ON_ONCE(order > 0); | 1159 | WARN_ON_ONCE(order > 1); |
| 1160 | } | 1160 | } |
| 1161 | spin_lock_irqsave(&zone->lock, flags); | 1161 | spin_lock_irqsave(&zone->lock, flags); |
| 1162 | page = __rmqueue(zone, order, migratetype); | 1162 | page = __rmqueue(zone, order, migratetype); |
| @@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
| 1085 | { | 1085 | { |
| 1086 | struct page *page; | 1086 | struct page *page; |
| 1087 | struct kmem_cache_order_objects oo = s->oo; | 1087 | struct kmem_cache_order_objects oo = s->oo; |
| 1088 | gfp_t alloc_gfp; | ||
| 1088 | 1089 | ||
| 1089 | flags |= s->allocflags; | 1090 | flags |= s->allocflags; |
| 1090 | 1091 | ||
| 1091 | page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, | 1092 | /* |
| 1092 | oo); | 1093 | * Let the initial higher-order allocation fail under memory pressure |
| 1094 | * so we fall-back to the minimum order allocation. | ||
| 1095 | */ | ||
| 1096 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; | ||
| 1097 | |||
| 1098 | page = alloc_slab_page(alloc_gfp, node, oo); | ||
| 1093 | if (unlikely(!page)) { | 1099 | if (unlikely(!page)) { |
| 1094 | oo = s->min; | 1100 | oo = s->min; |
| 1095 | /* | 1101 | /* |
