aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/memory_hotplug.c31
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/slub.c4
-rw-r--r--mm/vmalloc.c28
-rw-r--r--mm/vmstat.c4
9 files changed, 72 insertions, 35 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c4a3558589ab..85855240933d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2738,7 +2738,8 @@ out_page_table_lock:
2738 unlock_page(pagecache_page); 2738 unlock_page(pagecache_page);
2739 put_page(pagecache_page); 2739 put_page(pagecache_page);
2740 } 2740 }
2741 unlock_page(page); 2741 if (page != pagecache_page)
2742 unlock_page(page);
2742 2743
2743out_mutex: 2744out_mutex:
2744 mutex_unlock(&hugetlb_instantiation_mutex); 2745 mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/ksm.c b/mm/ksm.c
index 65ab5c7067d9..43bc893470b4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1724,8 +1724,13 @@ static int ksm_memory_callback(struct notifier_block *self,
1724 /* 1724 /*
1725 * Keep it very simple for now: just lock out ksmd and 1725 * Keep it very simple for now: just lock out ksmd and
1726 * MADV_UNMERGEABLE while any memory is going offline. 1726 * MADV_UNMERGEABLE while any memory is going offline.
1727 * mutex_lock_nested() is necessary because lockdep was alarmed
1728 * that here we take ksm_thread_mutex inside notifier chain
1729 * mutex, and later take notifier chain mutex inside
1730 * ksm_thread_mutex to unlock it. But that's safe because both
1731 * are inside mem_hotplug_mutex.
1727 */ 1732 */
1728 mutex_lock(&ksm_thread_mutex); 1733 mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING);
1729 break; 1734 break;
1730 1735
1731 case MEM_OFFLINE: 1736 case MEM_OFFLINE:
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 124324134ff6..46ab2c044b0e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -51,6 +51,7 @@
51#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/swapops.h> 52#include <linux/swapops.h>
53#include <linux/hugetlb.h> 53#include <linux/hugetlb.h>
54#include <linux/memory_hotplug.h>
54#include "internal.h" 55#include "internal.h"
55 56
56int sysctl_memory_failure_early_kill __read_mostly = 0; 57int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -1230,11 +1231,10 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1230 return 1; 1231 return 1;
1231 1232
1232 /* 1233 /*
1233 * The lock_system_sleep prevents a race with memory hotplug, 1234 * The lock_memory_hotplug prevents a race with memory hotplug.
1234 * because the isolation assumes there's only a single user.
1235 * This is a big hammer, a better would be nicer. 1235 * This is a big hammer, a better would be nicer.
1236 */ 1236 */
1237 lock_system_sleep(); 1237 lock_memory_hotplug();
1238 1238
1239 /* 1239 /*
1240 * Isolate the page, so that it doesn't get reallocated if it 1240 * Isolate the page, so that it doesn't get reallocated if it
@@ -1264,7 +1264,7 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1264 ret = 1; 1264 ret = 1;
1265 } 1265 }
1266 unset_migratetype_isolate(p); 1266 unset_migratetype_isolate(p);
1267 unlock_system_sleep(); 1267 unlock_memory_hotplug();
1268 return ret; 1268 return ret;
1269} 1269}
1270 1270
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9260314a221e..2c6523af5473 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -34,6 +34,23 @@
34 34
35#include "internal.h" 35#include "internal.h"
36 36
37DEFINE_MUTEX(mem_hotplug_mutex);
38
39void lock_memory_hotplug(void)
40{
41 mutex_lock(&mem_hotplug_mutex);
42
43 /* for exclusive hibernation if CONFIG_HIBERNATION=y */
44 lock_system_sleep();
45}
46
47void unlock_memory_hotplug(void)
48{
49 unlock_system_sleep();
50 mutex_unlock(&mem_hotplug_mutex);
51}
52
53
37/* add this memory to iomem resource */ 54/* add this memory to iomem resource */
38static struct resource *register_memory_resource(u64 start, u64 size) 55static struct resource *register_memory_resource(u64 start, u64 size)
39{ 56{
@@ -493,7 +510,7 @@ int mem_online_node(int nid)
493 pg_data_t *pgdat; 510 pg_data_t *pgdat;
494 int ret; 511 int ret;
495 512
496 lock_system_sleep(); 513 lock_memory_hotplug();
497 pgdat = hotadd_new_pgdat(nid, 0); 514 pgdat = hotadd_new_pgdat(nid, 0);
498 if (pgdat) { 515 if (pgdat) {
499 ret = -ENOMEM; 516 ret = -ENOMEM;
@@ -504,7 +521,7 @@ int mem_online_node(int nid)
504 BUG_ON(ret); 521 BUG_ON(ret);
505 522
506out: 523out:
507 unlock_system_sleep(); 524 unlock_memory_hotplug();
508 return ret; 525 return ret;
509} 526}
510 527
@@ -516,7 +533,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
516 struct resource *res; 533 struct resource *res;
517 int ret; 534 int ret;
518 535
519 lock_system_sleep(); 536 lock_memory_hotplug();
520 537
521 res = register_memory_resource(start, size); 538 res = register_memory_resource(start, size);
522 ret = -EEXIST; 539 ret = -EEXIST;
@@ -563,7 +580,7 @@ error:
563 release_memory_resource(res); 580 release_memory_resource(res);
564 581
565out: 582out:
566 unlock_system_sleep(); 583 unlock_memory_hotplug();
567 return ret; 584 return ret;
568} 585}
569EXPORT_SYMBOL_GPL(add_memory); 586EXPORT_SYMBOL_GPL(add_memory);
@@ -791,7 +808,7 @@ static int offline_pages(unsigned long start_pfn,
791 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 808 if (!test_pages_in_a_zone(start_pfn, end_pfn))
792 return -EINVAL; 809 return -EINVAL;
793 810
794 lock_system_sleep(); 811 lock_memory_hotplug();
795 812
796 zone = page_zone(pfn_to_page(start_pfn)); 813 zone = page_zone(pfn_to_page(start_pfn));
797 node = zone_to_nid(zone); 814 node = zone_to_nid(zone);
@@ -880,7 +897,7 @@ repeat:
880 writeback_set_ratelimit(); 897 writeback_set_ratelimit();
881 898
882 memory_notify(MEM_OFFLINE, &arg); 899 memory_notify(MEM_OFFLINE, &arg);
883 unlock_system_sleep(); 900 unlock_memory_hotplug();
884 return 0; 901 return 0;
885 902
886failed_removal: 903failed_removal:
@@ -891,7 +908,7 @@ failed_removal:
891 undo_isolate_page_range(start_pfn, end_pfn); 908 undo_isolate_page_range(start_pfn, end_pfn);
892 909
893out: 910out:
894 unlock_system_sleep(); 911 unlock_memory_hotplug();
895 return ret; 912 return ret;
896} 913}
897 914
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4a57f135b76e..11ff260fb282 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1307,15 +1307,18 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1307 goto out; 1307 goto out;
1308 1308
1309 /* Find the mm_struct */ 1309 /* Find the mm_struct */
1310 rcu_read_lock();
1310 read_lock(&tasklist_lock); 1311 read_lock(&tasklist_lock);
1311 task = pid ? find_task_by_vpid(pid) : current; 1312 task = pid ? find_task_by_vpid(pid) : current;
1312 if (!task) { 1313 if (!task) {
1313 read_unlock(&tasklist_lock); 1314 read_unlock(&tasklist_lock);
1315 rcu_read_unlock();
1314 err = -ESRCH; 1316 err = -ESRCH;
1315 goto out; 1317 goto out;
1316 } 1318 }
1317 mm = get_task_mm(task); 1319 mm = get_task_mm(task);
1318 read_unlock(&tasklist_lock); 1320 read_unlock(&tasklist_lock);
1321 rcu_read_unlock();
1319 1322
1320 err = -EINVAL; 1323 err = -EINVAL;
1321 if (!mm) 1324 if (!mm)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e4092704c1a9..ff7e15872398 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -104,19 +104,24 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
104 * only be modified with pm_mutex held, unless the suspend/hibernate code is 104 * only be modified with pm_mutex held, unless the suspend/hibernate code is
105 * guaranteed not to run in parallel with that modification). 105 * guaranteed not to run in parallel with that modification).
106 */ 106 */
107void set_gfp_allowed_mask(gfp_t mask) 107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
108{ 111{
109 WARN_ON(!mutex_is_locked(&pm_mutex)); 112 WARN_ON(!mutex_is_locked(&pm_mutex));
110 gfp_allowed_mask = mask; 113 if (saved_gfp_mask) {
114 gfp_allowed_mask = saved_gfp_mask;
115 saved_gfp_mask = 0;
116 }
111} 117}
112 118
113gfp_t clear_gfp_allowed_mask(gfp_t mask) 119void pm_restrict_gfp_mask(void)
114{ 120{
115 gfp_t ret = gfp_allowed_mask;
116
117 WARN_ON(!mutex_is_locked(&pm_mutex)); 121 WARN_ON(!mutex_is_locked(&pm_mutex));
118 gfp_allowed_mask &= ~mask; 122 WARN_ON(saved_gfp_mask);
119 return ret; 123 saved_gfp_mask = gfp_allowed_mask;
124 gfp_allowed_mask &= ~GFP_IOFS;
120} 125}
121#endif /* CONFIG_PM_SLEEP */ 126#endif /* CONFIG_PM_SLEEP */
122 127
diff --git a/mm/slub.c b/mm/slub.c
index 981fb730aa04..bec0e355fbad 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3401,13 +3401,13 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
3401 3401
3402 for_each_free_object(p, s, page->freelist) { 3402 for_each_free_object(p, s, page->freelist) {
3403 set_bit(slab_index(p, s, addr), map); 3403 set_bit(slab_index(p, s, addr), map);
3404 if (!check_object(s, page, p, 0)) 3404 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3405 return 0; 3405 return 0;
3406 } 3406 }
3407 3407
3408 for_each_object(p, s, addr, page->objects) 3408 for_each_object(p, s, addr, page->objects)
3409 if (!test_bit(slab_index(p, s, addr), map)) 3409 if (!test_bit(slab_index(p, s, addr), map))
3410 if (!check_object(s, page, p, 1)) 3410 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
3411 return 0; 3411 return 0;
3412 return 1; 3412 return 1;
3413} 3413}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a3d66b3dc5cb..eb5cc7d00c5a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -31,8 +31,6 @@
31#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
32#include <asm/shmparam.h> 32#include <asm/shmparam.h>
33 33
34bool vmap_lazy_unmap __read_mostly = true;
35
36/*** Page table manipulation functions ***/ 34/*** Page table manipulation functions ***/
37 35
38static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 36static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -503,9 +501,6 @@ static unsigned long lazy_max_pages(void)
503{ 501{
504 unsigned int log; 502 unsigned int log;
505 503
506 if (!vmap_lazy_unmap)
507 return 0;
508
509 log = fls(num_online_cpus()); 504 log = fls(num_online_cpus());
510 505
511 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 506 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
@@ -566,7 +561,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
566 if (va->va_end > *end) 561 if (va->va_end > *end)
567 *end = va->va_end; 562 *end = va->va_end;
568 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 563 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
569 unmap_vmap_area(va);
570 list_add_tail(&va->purge_list, &valist); 564 list_add_tail(&va->purge_list, &valist);
571 va->flags |= VM_LAZY_FREEING; 565 va->flags |= VM_LAZY_FREEING;
572 va->flags &= ~VM_LAZY_FREE; 566 va->flags &= ~VM_LAZY_FREE;
@@ -611,10 +605,11 @@ static void purge_vmap_area_lazy(void)
611} 605}
612 606
613/* 607/*
614 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 608 * Free a vmap area, caller ensuring that the area has been unmapped
615 * called for the correct range previously. 609 * and flush_cache_vunmap had been called for the correct range
610 * previously.
616 */ 611 */
617static void free_unmap_vmap_area_noflush(struct vmap_area *va) 612static void free_vmap_area_noflush(struct vmap_area *va)
618{ 613{
619 va->flags |= VM_LAZY_FREE; 614 va->flags |= VM_LAZY_FREE;
620 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 615 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
@@ -623,6 +618,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va)
623} 618}
624 619
625/* 620/*
621 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
622 * called for the correct range previously.
623 */
624static void free_unmap_vmap_area_noflush(struct vmap_area *va)
625{
626 unmap_vmap_area(va);
627 free_vmap_area_noflush(va);
628}
629
630/*
626 * Free and unmap a vmap area 631 * Free and unmap a vmap area
627 */ 632 */
628static void free_unmap_vmap_area(struct vmap_area *va) 633static void free_unmap_vmap_area(struct vmap_area *va)
@@ -798,7 +803,7 @@ static void free_vmap_block(struct vmap_block *vb)
798 spin_unlock(&vmap_block_tree_lock); 803 spin_unlock(&vmap_block_tree_lock);
799 BUG_ON(tmp != vb); 804 BUG_ON(tmp != vb);
800 805
801 free_unmap_vmap_area_noflush(vb->va); 806 free_vmap_area_noflush(vb->va);
802 call_rcu(&vb->rcu_head, rcu_free_vb); 807 call_rcu(&vb->rcu_head, rcu_free_vb);
803} 808}
804 809
@@ -936,6 +941,8 @@ static void vb_free(const void *addr, unsigned long size)
936 rcu_read_unlock(); 941 rcu_read_unlock();
937 BUG_ON(!vb); 942 BUG_ON(!vb);
938 943
944 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
945
939 spin_lock(&vb->lock); 946 spin_lock(&vb->lock);
940 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 947 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
941 948
@@ -988,7 +995,6 @@ void vm_unmap_aliases(void)
988 995
989 s = vb->va->va_start + (i << PAGE_SHIFT); 996 s = vb->va->va_start + (i << PAGE_SHIFT);
990 e = vb->va->va_start + (j << PAGE_SHIFT); 997 e = vb->va->va_start + (j << PAGE_SHIFT);
991 vunmap_page_range(s, e);
992 flush = 1; 998 flush = 1;
993 999
994 if (s < start) 1000 if (s < start)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 42eac4d33216..8f62f17ee1c7 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -750,8 +750,6 @@ static const char * const vmstat_text[] = {
750 "nr_shmem", 750 "nr_shmem",
751 "nr_dirtied", 751 "nr_dirtied",
752 "nr_written", 752 "nr_written",
753 "nr_dirty_threshold",
754 "nr_dirty_background_threshold",
755 753
756#ifdef CONFIG_NUMA 754#ifdef CONFIG_NUMA
757 "numa_hit", 755 "numa_hit",
@@ -761,6 +759,8 @@ static const char * const vmstat_text[] = {
761 "numa_local", 759 "numa_local",
762 "numa_other", 760 "numa_other",
763#endif 761#endif
762 "nr_dirty_threshold",
763 "nr_dirty_background_threshold",
764 764
765#ifdef CONFIG_VM_EVENT_COUNTERS 765#ifdef CONFIG_VM_EVENT_COUNTERS
766 "pgpgin", 766 "pgpgin",