aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/fremap.c12
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/mmap.c4
5 files changed, 21 insertions, 20 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index 4723ac8d2fc2..87da3590c61e 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -204,10 +204,8 @@ get_write_lock:
204 unsigned long addr; 204 unsigned long addr;
205 struct file *file = get_file(vma->vm_file); 205 struct file *file = get_file(vma->vm_file);
206 206
207 vm_flags = vma->vm_flags; 207 addr = mmap_region(file, start, size,
208 if (!(flags & MAP_NONBLOCK)) 208 vma->vm_flags, pgoff);
209 vm_flags |= VM_POPULATE;
210 addr = mmap_region(file, start, size, vm_flags, pgoff);
211 fput(file); 209 fput(file);
212 if (IS_ERR_VALUE(addr)) { 210 if (IS_ERR_VALUE(addr)) {
213 err = addr; 211 err = addr;
@@ -226,12 +224,6 @@ get_write_lock:
226 mutex_unlock(&mapping->i_mmap_mutex); 224 mutex_unlock(&mapping->i_mmap_mutex);
227 } 225 }
228 226
229 if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
230 if (!has_write_lock)
231 goto get_write_lock;
232 vma->vm_flags |= VM_POPULATE;
233 }
234
235 if (vma->vm_flags & VM_LOCKED) { 227 if (vma->vm_flags & VM_LOCKED) {
236 /* 228 /*
237 * drop PG_Mlocked flag for over-mapped range 229 * drop PG_Mlocked flag for over-mapped range
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a0be33bb199..ca9a7c6d7e97 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
2124/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 2124/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2125unsigned long hugetlb_total_pages(void) 2125unsigned long hugetlb_total_pages(void)
2126{ 2126{
2127 struct hstate *h = &default_hstate; 2127 struct hstate *h;
2128 return h->nr_huge_pages * pages_per_huge_page(h); 2128 unsigned long nr_total_pages = 0;
2129
2130 for_each_hstate(h)
2131 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2132 return nr_total_pages;
2129} 2133}
2130 2134
2131static int hugetlb_acct_memory(struct hstate *h, long delta) 2135static int hugetlb_acct_memory(struct hstate *h, long delta)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9597eec8239d..ee3765760818 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid)
1779 for (i = 0; i < MAX_NR_ZONES; i++) { 1779 for (i = 0; i < MAX_NR_ZONES; i++) {
1780 struct zone *zone = pgdat->node_zones + i; 1780 struct zone *zone = pgdat->node_zones + i;
1781 1781
1782 if (zone->wait_table) 1782 /*
1783 * wait_table may be allocated from boot memory,
1784 * here only free if it's allocated by vmalloc.
1785 */
1786 if (is_vmalloc_addr(zone->wait_table))
1783 vfree(zone->wait_table); 1787 vfree(zone->wait_table);
1784 } 1788 }
1785 1789
diff --git a/mm/mlock.c b/mm/mlock.c
index 1c5e33fce639..79b7cf7d1bca 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
358 358
359 newflags = vma->vm_flags & ~VM_LOCKED; 359 newflags = vma->vm_flags & ~VM_LOCKED;
360 if (on) 360 if (on)
361 newflags |= VM_LOCKED | VM_POPULATE; 361 newflags |= VM_LOCKED;
362 362
363 tmp = vma->vm_end; 363 tmp = vma->vm_end;
364 if (tmp > end) 364 if (tmp > end)
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
418 * range with the first VMA. Also, skip undesirable VMA types. 418 * range with the first VMA. Also, skip undesirable VMA types.
419 */ 419 */
420 nend = min(end, vma->vm_end); 420 nend = min(end, vma->vm_end);
421 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != 421 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
422 VM_POPULATE)
423 continue; 422 continue;
424 if (nstart < vma->vm_start) 423 if (nstart < vma->vm_start)
425 nstart = vma->vm_start; 424 nstart = vma->vm_start;
@@ -492,9 +491,9 @@ static int do_mlockall(int flags)
492 struct vm_area_struct * vma, * prev = NULL; 491 struct vm_area_struct * vma, * prev = NULL;
493 492
494 if (flags & MCL_FUTURE) 493 if (flags & MCL_FUTURE)
495 current->mm->def_flags |= VM_LOCKED | VM_POPULATE; 494 current->mm->def_flags |= VM_LOCKED;
496 else 495 else
497 current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); 496 current->mm->def_flags &= ~VM_LOCKED;
498 if (flags == MCL_FUTURE) 497 if (flags == MCL_FUTURE)
499 goto out; 498 goto out;
500 499
@@ -503,7 +502,7 @@ static int do_mlockall(int flags)
503 502
504 newflags = vma->vm_flags & ~VM_LOCKED; 503 newflags = vma->vm_flags & ~VM_LOCKED;
505 if (flags & MCL_CURRENT) 504 if (flags & MCL_CURRENT)
506 newflags |= VM_LOCKED | VM_POPULATE; 505 newflags |= VM_LOCKED;
507 506
508 /* Ignore errors */ 507 /* Ignore errors */
509 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 508 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
diff --git a/mm/mmap.c b/mm/mmap.c
index 2664a47cec93..6466699b16cb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1306 } 1306 }
1307 1307
1308 addr = mmap_region(file, addr, len, vm_flags, pgoff); 1308 addr = mmap_region(file, addr, len, vm_flags, pgoff);
1309 if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) 1309 if (!IS_ERR_VALUE(addr) &&
1310 ((vm_flags & VM_LOCKED) ||
1311 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1310 *populate = len; 1312 *populate = len;
1311 return addr; 1313 return addr;
1312} 1314}