aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c17
-rw-r--r--mm/kmemleak.c52
-rw-r--r--mm/memory.c34
-rw-r--r--mm/nommu.c12
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shmem_acl.c29
-rw-r--r--mm/slub.c10
-rw-r--r--mm/thrash.c32
-rw-r--r--mm/vmscan.c2
10 files changed, 91 insertions, 113 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a56e6f3ce979..d0351e31f474 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1985,7 +1985,7 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
1985} 1985}
1986 1986
1987static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 1987static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1988 unsigned long address, pte_t *ptep, int write_access) 1988 unsigned long address, pte_t *ptep, unsigned int flags)
1989{ 1989{
1990 struct hstate *h = hstate_vma(vma); 1990 struct hstate *h = hstate_vma(vma);
1991 int ret = VM_FAULT_SIGBUS; 1991 int ret = VM_FAULT_SIGBUS;
@@ -2053,7 +2053,7 @@ retry:
2053 * any allocations necessary to record that reservation occur outside 2053 * any allocations necessary to record that reservation occur outside
2054 * the spinlock. 2054 * the spinlock.
2055 */ 2055 */
2056 if (write_access && !(vma->vm_flags & VM_SHARED)) 2056 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2057 if (vma_needs_reservation(h, vma, address) < 0) { 2057 if (vma_needs_reservation(h, vma, address) < 0) {
2058 ret = VM_FAULT_OOM; 2058 ret = VM_FAULT_OOM;
2059 goto backout_unlocked; 2059 goto backout_unlocked;
@@ -2072,7 +2072,7 @@ retry:
2072 && (vma->vm_flags & VM_SHARED))); 2072 && (vma->vm_flags & VM_SHARED)));
2073 set_huge_pte_at(mm, address, ptep, new_pte); 2073 set_huge_pte_at(mm, address, ptep, new_pte);
2074 2074
2075 if (write_access && !(vma->vm_flags & VM_SHARED)) { 2075 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2076 /* Optimization, do the COW without a second fault */ 2076 /* Optimization, do the COW without a second fault */
2077 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 2077 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2078 } 2078 }
@@ -2091,7 +2091,7 @@ backout_unlocked:
2091} 2091}
2092 2092
2093int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2093int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2094 unsigned long address, int write_access) 2094 unsigned long address, unsigned int flags)
2095{ 2095{
2096 pte_t *ptep; 2096 pte_t *ptep;
2097 pte_t entry; 2097 pte_t entry;
@@ -2112,7 +2112,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2112 mutex_lock(&hugetlb_instantiation_mutex); 2112 mutex_lock(&hugetlb_instantiation_mutex);
2113 entry = huge_ptep_get(ptep); 2113 entry = huge_ptep_get(ptep);
2114 if (huge_pte_none(entry)) { 2114 if (huge_pte_none(entry)) {
2115 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2115 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2116 goto out_mutex; 2116 goto out_mutex;
2117 } 2117 }
2118 2118
@@ -2126,7 +2126,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2126 * page now as it is used to determine if a reservation has been 2126 * page now as it is used to determine if a reservation has been
2127 * consumed. 2127 * consumed.
2128 */ 2128 */
2129 if (write_access && !pte_write(entry)) { 2129 if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2130 if (vma_needs_reservation(h, vma, address) < 0) { 2130 if (vma_needs_reservation(h, vma, address) < 0) {
2131 ret = VM_FAULT_OOM; 2131 ret = VM_FAULT_OOM;
2132 goto out_mutex; 2132 goto out_mutex;
@@ -2143,7 +2143,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2143 goto out_page_table_lock; 2143 goto out_page_table_lock;
2144 2144
2145 2145
2146 if (write_access) { 2146 if (flags & FAULT_FLAG_WRITE) {
2147 if (!pte_write(entry)) { 2147 if (!pte_write(entry)) {
2148 ret = hugetlb_cow(mm, vma, address, ptep, entry, 2148 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2149 pagecache_page); 2149 pagecache_page);
@@ -2152,7 +2152,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2152 entry = pte_mkdirty(entry); 2152 entry = pte_mkdirty(entry);
2153 } 2153 }
2154 entry = pte_mkyoung(entry); 2154 entry = pte_mkyoung(entry);
2155 if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access)) 2155 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2156 flags & FAULT_FLAG_WRITE))
2156 update_mmu_cache(vma, address, entry); 2157 update_mmu_cache(vma, address, entry);
2157 2158
2158out_page_table_lock: 2159out_page_table_lock:
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index ec759b60077a..c96f2c8700aa 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -61,6 +61,8 @@
61 * structure. 61 * structure.
62 */ 62 */
63 63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
64#include <linux/init.h> 66#include <linux/init.h>
65#include <linux/kernel.h> 67#include <linux/kernel.h>
66#include <linux/list.h> 68#include <linux/list.h>
@@ -311,7 +313,7 @@ static int unreferenced_object(struct kmemleak_object *object)
311 313
312static void print_referenced(struct kmemleak_object *object) 314static void print_referenced(struct kmemleak_object *object)
313{ 315{
314 pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n", 316 pr_info("referenced object 0x%08lx (size %zu)\n",
315 object->pointer, object->size); 317 object->pointer, object->size);
316} 318}
317 319
@@ -320,7 +322,7 @@ static void print_unreferenced(struct seq_file *seq,
320{ 322{
321 int i; 323 int i;
322 324
323 print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n", 325 print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
324 object->pointer, object->size); 326 object->pointer, object->size);
325 print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n", 327 print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
326 object->comm, object->pid, object->jiffies); 328 object->comm, object->pid, object->jiffies);
@@ -344,7 +346,7 @@ static void dump_object_info(struct kmemleak_object *object)
344 trace.nr_entries = object->trace_len; 346 trace.nr_entries = object->trace_len;
345 trace.entries = object->trace; 347 trace.entries = object->trace;
346 348
347 pr_notice("kmemleak: Object 0x%08lx (size %zu):\n", 349 pr_notice("Object 0x%08lx (size %zu):\n",
348 object->tree_node.start, object->size); 350 object->tree_node.start, object->size);
349 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", 351 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
350 object->comm, object->pid, object->jiffies); 352 object->comm, object->pid, object->jiffies);
@@ -372,7 +374,7 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
372 object = prio_tree_entry(node, struct kmemleak_object, 374 object = prio_tree_entry(node, struct kmemleak_object,
373 tree_node); 375 tree_node);
374 if (!alias && object->pointer != ptr) { 376 if (!alias && object->pointer != ptr) {
375 kmemleak_warn("kmemleak: Found object by alias"); 377 kmemleak_warn("Found object by alias");
376 object = NULL; 378 object = NULL;
377 } 379 }
378 } else 380 } else
@@ -467,8 +469,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
467 469
468 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); 470 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
469 if (!object) { 471 if (!object) {
470 kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object " 472 kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
471 "structure\n");
472 return; 473 return;
473 } 474 }
474 475
@@ -527,8 +528,8 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
527 if (node != &object->tree_node) { 528 if (node != &object->tree_node) {
528 unsigned long flags; 529 unsigned long flags;
529 530
530 kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object " 531 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
531 "search tree (already existing)\n", ptr); 532 "(already existing)\n", ptr);
532 object = lookup_object(ptr, 1); 533 object = lookup_object(ptr, 1);
533 spin_lock_irqsave(&object->lock, flags); 534 spin_lock_irqsave(&object->lock, flags);
534 dump_object_info(object); 535 dump_object_info(object);
@@ -553,7 +554,7 @@ static void delete_object(unsigned long ptr)
553 write_lock_irqsave(&kmemleak_lock, flags); 554 write_lock_irqsave(&kmemleak_lock, flags);
554 object = lookup_object(ptr, 0); 555 object = lookup_object(ptr, 0);
555 if (!object) { 556 if (!object) {
556 kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n", 557 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
557 ptr); 558 ptr);
558 write_unlock_irqrestore(&kmemleak_lock, flags); 559 write_unlock_irqrestore(&kmemleak_lock, flags);
559 return; 560 return;
@@ -588,8 +589,7 @@ static void make_gray_object(unsigned long ptr)
588 589
589 object = find_and_get_object(ptr, 0); 590 object = find_and_get_object(ptr, 0);
590 if (!object) { 591 if (!object) {
591 kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n", 592 kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
592 ptr);
593 return; 593 return;
594 } 594 }
595 595
@@ -610,8 +610,7 @@ static void make_black_object(unsigned long ptr)
610 610
611 object = find_and_get_object(ptr, 0); 611 object = find_and_get_object(ptr, 0);
612 if (!object) { 612 if (!object) {
613 kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n", 613 kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
614 ptr);
615 return; 614 return;
616 } 615 }
617 616
@@ -634,21 +633,20 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
634 633
635 object = find_and_get_object(ptr, 0); 634 object = find_and_get_object(ptr, 0);
636 if (!object) { 635 if (!object) {
637 kmemleak_warn("kmemleak: Adding scan area to unknown " 636 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
638 "object at 0x%08lx\n", ptr); 637 ptr);
639 return; 638 return;
640 } 639 }
641 640
642 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); 641 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
643 if (!area) { 642 if (!area) {
644 kmemleak_warn("kmemleak: Cannot allocate a scan area\n"); 643 kmemleak_warn("Cannot allocate a scan area\n");
645 goto out; 644 goto out;
646 } 645 }
647 646
648 spin_lock_irqsave(&object->lock, flags); 647 spin_lock_irqsave(&object->lock, flags);
649 if (offset + length > object->size) { 648 if (offset + length > object->size) {
650 kmemleak_warn("kmemleak: Scan area larger than object " 649 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
651 "0x%08lx\n", ptr);
652 dump_object_info(object); 650 dump_object_info(object);
653 kmem_cache_free(scan_area_cache, area); 651 kmem_cache_free(scan_area_cache, area);
654 goto out_unlock; 652 goto out_unlock;
@@ -677,8 +675,7 @@ static void object_no_scan(unsigned long ptr)
677 675
678 object = find_and_get_object(ptr, 0); 676 object = find_and_get_object(ptr, 0);
679 if (!object) { 677 if (!object) {
680 kmemleak_warn("kmemleak: Not scanning unknown object at " 678 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
681 "0x%08lx\n", ptr);
682 return; 679 return;
683 } 680 }
684 681
@@ -699,7 +696,7 @@ static void log_early(int op_type, const void *ptr, size_t size,
699 struct early_log *log; 696 struct early_log *log;
700 697
701 if (crt_early_log >= ARRAY_SIZE(early_log)) { 698 if (crt_early_log >= ARRAY_SIZE(early_log)) {
702 kmemleak_stop("kmemleak: Early log buffer exceeded\n"); 699 kmemleak_stop("Early log buffer exceeded\n");
703 return; 700 return;
704 } 701 }
705 702
@@ -966,7 +963,7 @@ static void kmemleak_scan(void)
966 * 1 reference to any object at this point. 963 * 1 reference to any object at this point.
967 */ 964 */
968 if (atomic_read(&object->use_count) > 1) { 965 if (atomic_read(&object->use_count) > 1) {
969 pr_debug("kmemleak: object->use_count = %d\n", 966 pr_debug("object->use_count = %d\n",
970 atomic_read(&object->use_count)); 967 atomic_read(&object->use_count));
971 dump_object_info(object); 968 dump_object_info(object);
972 } 969 }
@@ -1062,7 +1059,7 @@ static int kmemleak_scan_thread(void *arg)
1062{ 1059{
1063 static int first_run = 1; 1060 static int first_run = 1;
1064 1061
1065 pr_info("kmemleak: Automatic memory scanning thread started\n"); 1062 pr_info("Automatic memory scanning thread started\n");
1066 1063
1067 /* 1064 /*
1068 * Wait before the first scan to allow the system to fully initialize. 1065 * Wait before the first scan to allow the system to fully initialize.
@@ -1108,7 +1105,7 @@ static int kmemleak_scan_thread(void *arg)
1108 timeout = schedule_timeout_interruptible(timeout); 1105 timeout = schedule_timeout_interruptible(timeout);
1109 } 1106 }
1110 1107
1111 pr_info("kmemleak: Automatic memory scanning thread ended\n"); 1108 pr_info("Automatic memory scanning thread ended\n");
1112 1109
1113 return 0; 1110 return 0;
1114} 1111}
@@ -1123,7 +1120,7 @@ void start_scan_thread(void)
1123 return; 1120 return;
1124 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1121 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1125 if (IS_ERR(scan_thread)) { 1122 if (IS_ERR(scan_thread)) {
1126 pr_warning("kmemleak: Failed to create the scan thread\n"); 1123 pr_warning("Failed to create the scan thread\n");
1127 scan_thread = NULL; 1124 scan_thread = NULL;
1128 } 1125 }
1129} 1126}
@@ -1367,7 +1364,7 @@ static void kmemleak_cleanup(void)
1367 cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, 1364 cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1368 "kmemleak-clean"); 1365 "kmemleak-clean");
1369 if (IS_ERR(cleanup_thread)) 1366 if (IS_ERR(cleanup_thread))
1370 pr_warning("kmemleak: Failed to create the clean-up thread\n"); 1367 pr_warning("Failed to create the clean-up thread\n");
1371} 1368}
1372 1369
1373/* 1370/*
@@ -1488,8 +1485,7 @@ static int __init kmemleak_late_init(void)
1488 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, 1485 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1489 &kmemleak_fops); 1486 &kmemleak_fops);
1490 if (!dentry) 1487 if (!dentry)
1491 pr_warning("kmemleak: Failed to create the debugfs kmemleak " 1488 pr_warning("Failed to create the debugfs kmemleak file\n");
1492 "file\n");
1493 mutex_lock(&kmemleak_mutex); 1489 mutex_lock(&kmemleak_mutex);
1494 start_scan_thread(); 1490 start_scan_thread();
1495 mutex_unlock(&kmemleak_mutex); 1491 mutex_unlock(&kmemleak_mutex);
diff --git a/mm/memory.c b/mm/memory.c
index 98bcb90d5957..65216194eb8d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma)
1207 1207
1208 1208
1209int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1209int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1210 unsigned long start, int len, int flags, 1210 unsigned long start, int nr_pages, int flags,
1211 struct page **pages, struct vm_area_struct **vmas) 1211 struct page **pages, struct vm_area_struct **vmas)
1212{ 1212{
1213 int i; 1213 int i;
1214 unsigned int vm_flags = 0; 1214 unsigned int vm_flags = 0;
@@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1217 int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); 1217 int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1218 int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); 1218 int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1219 1219
1220 if (len <= 0) 1220 if (nr_pages <= 0)
1221 return 0; 1221 return 0;
1222 /* 1222 /*
1223 * Require read or write permissions. 1223 * Require read or write permissions.
@@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1269 vmas[i] = gate_vma; 1269 vmas[i] = gate_vma;
1270 i++; 1270 i++;
1271 start += PAGE_SIZE; 1271 start += PAGE_SIZE;
1272 len--; 1272 nr_pages--;
1273 continue; 1273 continue;
1274 } 1274 }
1275 1275
@@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1280 1280
1281 if (is_vm_hugetlb_page(vma)) { 1281 if (is_vm_hugetlb_page(vma)) {
1282 i = follow_hugetlb_page(mm, vma, pages, vmas, 1282 i = follow_hugetlb_page(mm, vma, pages, vmas,
1283 &start, &len, i, write); 1283 &start, &nr_pages, i, write);
1284 continue; 1284 continue;
1285 } 1285 }
1286 1286
@@ -1311,8 +1311,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1311 while (!(page = follow_page(vma, start, foll_flags))) { 1311 while (!(page = follow_page(vma, start, foll_flags))) {
1312 int ret; 1312 int ret;
1313 1313
1314 /* FOLL_WRITE matches FAULT_FLAG_WRITE! */ 1314 ret = handle_mm_fault(mm, vma, start,
1315 ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); 1315 (foll_flags & FOLL_WRITE) ?
1316 FAULT_FLAG_WRITE : 0);
1317
1316 if (ret & VM_FAULT_ERROR) { 1318 if (ret & VM_FAULT_ERROR) {
1317 if (ret & VM_FAULT_OOM) 1319 if (ret & VM_FAULT_OOM)
1318 return i ? i : -ENOMEM; 1320 return i ? i : -ENOMEM;
@@ -1355,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1355 vmas[i] = vma; 1357 vmas[i] = vma;
1356 i++; 1358 i++;
1357 start += PAGE_SIZE; 1359 start += PAGE_SIZE;
1358 len--; 1360 nr_pages--;
1359 } while (len && start < vma->vm_end); 1361 } while (nr_pages && start < vma->vm_end);
1360 } while (len); 1362 } while (nr_pages);
1361 return i; 1363 return i;
1362} 1364}
1363 1365
@@ -1366,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1366 * @tsk: task_struct of target task 1368 * @tsk: task_struct of target task
1367 * @mm: mm_struct of target mm 1369 * @mm: mm_struct of target mm
1368 * @start: starting user address 1370 * @start: starting user address
1369 * @len: number of pages from start to pin 1371 * @nr_pages: number of pages from start to pin
1370 * @write: whether pages will be written to by the caller 1372 * @write: whether pages will be written to by the caller
1371 * @force: whether to force write access even if user mapping is 1373 * @force: whether to force write access even if user mapping is
1372 * readonly. This will result in the page being COWed even 1374 * readonly. This will result in the page being COWed even
@@ -1378,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1378 * Or NULL if the caller does not require them. 1380 * Or NULL if the caller does not require them.
1379 * 1381 *
1380 * Returns number of pages pinned. This may be fewer than the number 1382 * Returns number of pages pinned. This may be fewer than the number
1381 * requested. If len is 0 or negative, returns 0. If no pages 1383 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1382 * were pinned, returns -errno. Each page returned must be released 1384 * were pinned, returns -errno. Each page returned must be released
1383 * with a put_page() call when it is finished with. vmas will only 1385 * with a put_page() call when it is finished with. vmas will only
1384 * remain valid while mmap_sem is held. 1386 * remain valid while mmap_sem is held.
@@ -1412,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1412 * See also get_user_pages_fast, for performance critical applications. 1414 * See also get_user_pages_fast, for performance critical applications.
1413 */ 1415 */
1414int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1416int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1415 unsigned long start, int len, int write, int force, 1417 unsigned long start, int nr_pages, int write, int force,
1416 struct page **pages, struct vm_area_struct **vmas) 1418 struct page **pages, struct vm_area_struct **vmas)
1417{ 1419{
1418 int flags = 0; 1420 int flags = 0;
@@ -1422,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1422 if (force) 1424 if (force)
1423 flags |= GUP_FLAGS_FORCE; 1425 flags |= GUP_FLAGS_FORCE;
1424 1426
1425 return __get_user_pages(tsk, mm, 1427 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
1426 start, len, flags,
1427 pages, vmas);
1428} 1428}
1429 1429
1430EXPORT_SYMBOL(get_user_pages); 1430EXPORT_SYMBOL(get_user_pages);
@@ -2517,7 +2517,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2517 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2517 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2518 page = lookup_swap_cache(entry); 2518 page = lookup_swap_cache(entry);
2519 if (!page) { 2519 if (!page) {
2520 grab_swap_token(); /* Contend for token _before_ read-in */ 2520 grab_swap_token(mm); /* Contend for token _before_ read-in */
2521 page = swapin_readahead(entry, 2521 page = swapin_readahead(entry,
2522 GFP_HIGHUSER_MOVABLE, vma, address); 2522 GFP_HIGHUSER_MOVABLE, vma, address);
2523 if (!page) { 2523 if (!page) {
diff --git a/mm/nommu.c b/mm/nommu.c
index 2fd2ad5da98e..bf0cc762a7d2 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp)
173} 173}
174 174
175int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 175int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
176 unsigned long start, int len, int flags, 176 unsigned long start, int nr_pages, int flags,
177 struct page **pages, struct vm_area_struct **vmas) 177 struct page **pages, struct vm_area_struct **vmas)
178{ 178{
179 struct vm_area_struct *vma; 179 struct vm_area_struct *vma;
180 unsigned long vm_flags; 180 unsigned long vm_flags;
@@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
189 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 189 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
190 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 190 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
191 191
192 for (i = 0; i < len; i++) { 192 for (i = 0; i < nr_pages; i++) {
193 vma = find_vma(mm, start); 193 vma = find_vma(mm, start);
194 if (!vma) 194 if (!vma)
195 goto finish_or_fault; 195 goto finish_or_fault;
@@ -224,7 +224,7 @@ finish_or_fault:
224 * - don't permit access to VMAs that don't support it, such as I/O mappings 224 * - don't permit access to VMAs that don't support it, such as I/O mappings
225 */ 225 */
226int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 226int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
227 unsigned long start, int len, int write, int force, 227 unsigned long start, int nr_pages, int write, int force,
228 struct page **pages, struct vm_area_struct **vmas) 228 struct page **pages, struct vm_area_struct **vmas)
229{ 229{
230 int flags = 0; 230 int flags = 0;
@@ -234,9 +234,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
234 if (force) 234 if (force)
235 flags |= GUP_FLAGS_FORCE; 235 flags |= GUP_FLAGS_FORCE;
236 236
237 return __get_user_pages(tsk, mm, 237 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
238 start, len, flags,
239 pages, vmas);
240} 238}
241EXPORT_SYMBOL(get_user_pages); 239EXPORT_SYMBOL(get_user_pages);
242 240
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 30d5093a099d..5d714f8fb303 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1153,10 +1153,10 @@ again:
1153 * properly detect and handle allocation failures. 1153 * properly detect and handle allocation failures.
1154 * 1154 *
1155 * We most definitely don't want callers attempting to 1155 * We most definitely don't want callers attempting to
1156 * allocate greater than single-page units with 1156 * allocate greater than order-1 page units with
1157 * __GFP_NOFAIL. 1157 * __GFP_NOFAIL.
1158 */ 1158 */
1159 WARN_ON_ONCE(order > 0); 1159 WARN_ON_ONCE(order > 1);
1160 } 1160 }
1161 spin_lock_irqsave(&zone->lock, flags); 1161 spin_lock_irqsave(&zone->lock, flags);
1162 page = __rmqueue(zone, order, migratetype); 1162 page = __rmqueue(zone, order, migratetype);
@@ -3026,7 +3026,7 @@ bad:
3026 if (dzone == zone) 3026 if (dzone == zone)
3027 break; 3027 break;
3028 kfree(zone_pcp(dzone, cpu)); 3028 kfree(zone_pcp(dzone, cpu));
3029 zone_pcp(dzone, cpu) = NULL; 3029 zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3030 } 3030 }
3031 return -ENOMEM; 3031 return -ENOMEM;
3032} 3032}
@@ -3041,7 +3041,7 @@ static inline void free_zone_pagesets(int cpu)
3041 /* Free per_cpu_pageset if it is slab allocated */ 3041 /* Free per_cpu_pageset if it is slab allocated */
3042 if (pset != &boot_pageset[cpu]) 3042 if (pset != &boot_pageset[cpu])
3043 kfree(pset); 3043 kfree(pset);
3044 zone_pcp(zone, cpu) = NULL; 3044 zone_pcp(zone, cpu) = &boot_pageset[cpu];
3045 } 3045 }
3046} 3046}
3047 3047
@@ -4659,7 +4659,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4659 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 4659 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4660 if (!write || (ret == -EINVAL)) 4660 if (!write || (ret == -EINVAL))
4661 return ret; 4661 return ret;
4662 for_each_zone(zone) { 4662 for_each_populated_zone(zone) {
4663 for_each_online_cpu(cpu) { 4663 for_each_online_cpu(cpu) {
4664 unsigned long high; 4664 unsigned long high;
4665 high = zone->present_pages / percpu_pagelist_fraction; 4665 high = zone->present_pages / percpu_pagelist_fraction;
diff --git a/mm/shmem.c b/mm/shmem.c
index e89d7ec18eda..d713239ce2ce 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1558 spin_lock_init(&info->lock); 1558 spin_lock_init(&info->lock);
1559 info->flags = flags & VM_NORESERVE; 1559 info->flags = flags & VM_NORESERVE;
1560 INIT_LIST_HEAD(&info->swaplist); 1560 INIT_LIST_HEAD(&info->swaplist);
1561 cache_no_acl(inode);
1561 1562
1562 switch (mode & S_IFMT) { 1563 switch (mode & S_IFMT) {
1563 default: 1564 default:
@@ -2388,7 +2389,6 @@ static void shmem_destroy_inode(struct inode *inode)
2388 /* only struct inode is valid if it's an inline symlink */ 2389 /* only struct inode is valid if it's an inline symlink */
2389 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2390 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2390 } 2391 }
2391 shmem_acl_destroy_inode(inode);
2392 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2392 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2393} 2393}
2394 2394
@@ -2397,10 +2397,6 @@ static void init_once(void *foo)
2397 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2397 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2398 2398
2399 inode_init_once(&p->vfs_inode); 2399 inode_init_once(&p->vfs_inode);
2400#ifdef CONFIG_TMPFS_POSIX_ACL
2401 p->i_acl = NULL;
2402 p->i_default_acl = NULL;
2403#endif
2404} 2400}
2405 2401
2406static int init_inodecache(void) 2402static int init_inodecache(void)
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index 8e5aadd7dcd6..606a8e757a42 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -22,11 +22,11 @@ shmem_get_acl(struct inode *inode, int type)
22 spin_lock(&inode->i_lock); 22 spin_lock(&inode->i_lock);
23 switch(type) { 23 switch(type) {
24 case ACL_TYPE_ACCESS: 24 case ACL_TYPE_ACCESS:
25 acl = posix_acl_dup(SHMEM_I(inode)->i_acl); 25 acl = posix_acl_dup(inode->i_acl);
26 break; 26 break;
27 27
28 case ACL_TYPE_DEFAULT: 28 case ACL_TYPE_DEFAULT:
29 acl = posix_acl_dup(SHMEM_I(inode)->i_default_acl); 29 acl = posix_acl_dup(inode->i_default_acl);
30 break; 30 break;
31 } 31 }
32 spin_unlock(&inode->i_lock); 32 spin_unlock(&inode->i_lock);
@@ -45,13 +45,13 @@ shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
45 spin_lock(&inode->i_lock); 45 spin_lock(&inode->i_lock);
46 switch(type) { 46 switch(type) {
47 case ACL_TYPE_ACCESS: 47 case ACL_TYPE_ACCESS:
48 free = SHMEM_I(inode)->i_acl; 48 free = inode->i_acl;
49 SHMEM_I(inode)->i_acl = posix_acl_dup(acl); 49 inode->i_acl = posix_acl_dup(acl);
50 break; 50 break;
51 51
52 case ACL_TYPE_DEFAULT: 52 case ACL_TYPE_DEFAULT:
53 free = SHMEM_I(inode)->i_default_acl; 53 free = inode->i_default_acl;
54 SHMEM_I(inode)->i_default_acl = posix_acl_dup(acl); 54 inode->i_default_acl = posix_acl_dup(acl);
55 break; 55 break;
56 } 56 }
57 spin_unlock(&inode->i_lock); 57 spin_unlock(&inode->i_lock);
@@ -155,23 +155,6 @@ shmem_acl_init(struct inode *inode, struct inode *dir)
155} 155}
156 156
157/** 157/**
158 * shmem_acl_destroy_inode - destroy acls hanging off the in-memory inode
159 *
160 * This is done before destroying the actual inode.
161 */
162
163void
164shmem_acl_destroy_inode(struct inode *inode)
165{
166 if (SHMEM_I(inode)->i_acl)
167 posix_acl_release(SHMEM_I(inode)->i_acl);
168 SHMEM_I(inode)->i_acl = NULL;
169 if (SHMEM_I(inode)->i_default_acl)
170 posix_acl_release(SHMEM_I(inode)->i_default_acl);
171 SHMEM_I(inode)->i_default_acl = NULL;
172}
173
174/**
175 * shmem_check_acl - check_acl() callback for generic_permission() 158 * shmem_check_acl - check_acl() callback for generic_permission()
176 */ 159 */
177static int 160static int
diff --git a/mm/slub.c b/mm/slub.c
index ce62b770e2fc..819f056b39c6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1085{ 1085{
1086 struct page *page; 1086 struct page *page;
1087 struct kmem_cache_order_objects oo = s->oo; 1087 struct kmem_cache_order_objects oo = s->oo;
1088 gfp_t alloc_gfp;
1088 1089
1089 flags |= s->allocflags; 1090 flags |= s->allocflags;
1090 1091
1091 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, 1092 /*
1092 oo); 1093 * Let the initial higher-order allocation fail under memory pressure
1094 * so we fall-back to the minimum order allocation.
1095 */
1096 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1097
1098 page = alloc_slab_page(alloc_gfp, node, oo);
1093 if (unlikely(!page)) { 1099 if (unlikely(!page)) {
1094 oo = s->min; 1100 oo = s->min;
1095 /* 1101 /*
diff --git a/mm/thrash.c b/mm/thrash.c
index c4c5205a9c35..2372d4ed5dd8 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -26,47 +26,45 @@ static DEFINE_SPINLOCK(swap_token_lock);
26struct mm_struct *swap_token_mm; 26struct mm_struct *swap_token_mm;
27static unsigned int global_faults; 27static unsigned int global_faults;
28 28
29void grab_swap_token(void) 29void grab_swap_token(struct mm_struct *mm)
30{ 30{
31 int current_interval; 31 int current_interval;
32 32
33 global_faults++; 33 global_faults++;
34 34
35 current_interval = global_faults - current->mm->faultstamp; 35 current_interval = global_faults - mm->faultstamp;
36 36
37 if (!spin_trylock(&swap_token_lock)) 37 if (!spin_trylock(&swap_token_lock))
38 return; 38 return;
39 39
40 /* First come first served */ 40 /* First come first served */
41 if (swap_token_mm == NULL) { 41 if (swap_token_mm == NULL) {
42 current->mm->token_priority = current->mm->token_priority + 2; 42 mm->token_priority = mm->token_priority + 2;
43 swap_token_mm = current->mm; 43 swap_token_mm = mm;
44 goto out; 44 goto out;
45 } 45 }
46 46
47 if (current->mm != swap_token_mm) { 47 if (mm != swap_token_mm) {
48 if (current_interval < current->mm->last_interval) 48 if (current_interval < mm->last_interval)
49 current->mm->token_priority++; 49 mm->token_priority++;
50 else { 50 else {
51 if (likely(current->mm->token_priority > 0)) 51 if (likely(mm->token_priority > 0))
52 current->mm->token_priority--; 52 mm->token_priority--;
53 } 53 }
54 /* Check if we deserve the token */ 54 /* Check if we deserve the token */
55 if (current->mm->token_priority > 55 if (mm->token_priority > swap_token_mm->token_priority) {
56 swap_token_mm->token_priority) { 56 mm->token_priority += 2;
57 current->mm->token_priority += 2; 57 swap_token_mm = mm;
58 swap_token_mm = current->mm;
59 } 58 }
60 } else { 59 } else {
61 /* Token holder came in again! */ 60 /* Token holder came in again! */
62 current->mm->token_priority += 2; 61 mm->token_priority += 2;
63 } 62 }
64 63
65out: 64out:
66 current->mm->faultstamp = global_faults; 65 mm->faultstamp = global_faults;
67 current->mm->last_interval = current_interval; 66 mm->last_interval = current_interval;
68 spin_unlock(&swap_token_lock); 67 spin_unlock(&swap_token_lock);
69return;
70} 68}
71 69
72/* Called on process exit. */ 70/* Called on process exit. */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e8fa2d9eb212..54155268dfca 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -932,7 +932,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
932 continue; 932 continue;
933 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 933 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
934 list_move(&cursor_page->lru, dst); 934 list_move(&cursor_page->lru, dst);
935 mem_cgroup_del_lru(page); 935 mem_cgroup_del_lru(cursor_page);
936 nr_taken++; 936 nr_taken++;
937 scan++; 937 scan++;
938 } 938 }