aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/fadvise.c20
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/swapfile.c14
4 files changed, 15 insertions, 27 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 907c39257ca0..0a03357a1f8e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -35,17 +35,6 @@
35 * 35 *
36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk. 36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
37 * 37 *
38 * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently
39 * dirty pages at the disk.
40 *
41 * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push
42 * all of the currently dirty pages at the disk, wait until they have been
43 * written.
44 *
45 * It should be noted that none of these operations write out the file's
46 * metadata. So unless the application is strictly performing overwrites of
47 * already-instantiated disk blocks, there are no guarantees here that the data
48 * will be available after a crash.
49 */ 38 */
50asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) 39asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
51{ 40{
@@ -129,15 +118,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
129 invalidate_mapping_pages(mapping, start_index, 118 invalidate_mapping_pages(mapping, start_index,
130 end_index); 119 end_index);
131 break; 120 break;
132 case LINUX_FADV_ASYNC_WRITE:
133 ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
134 WB_SYNC_NONE);
135 break;
136 case LINUX_FADV_WRITE_WAIT:
137 ret = wait_on_page_writeback_range(mapping,
138 offset >> PAGE_CACHE_SHIFT,
139 endbyte >> PAGE_CACHE_SHIFT);
140 break;
141 default: 121 default:
142 ret = -EINVAL; 122 ret = -EINVAL;
143 } 123 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ebad6bbb3501..832f676ca038 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -334,6 +334,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
334 return nr_huge_pages; 334 return nr_huge_pages;
335 335
336 spin_lock(&hugetlb_lock); 336 spin_lock(&hugetlb_lock);
337 count = max(count, reserved_huge_pages);
337 try_to_free_low(count); 338 try_to_free_low(count);
338 while (count < nr_huge_pages) { 339 while (count < nr_huge_pages) {
339 struct page *page = dequeue_huge_page(NULL, 0); 340 struct page *page = dequeue_huge_page(NULL, 0);
@@ -697,9 +698,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
697 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 698 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
698 page = pte_page(*pte); 699 page = pte_page(*pte);
699same_page: 700same_page:
700 get_page(page); 701 if (pages) {
701 if (pages) 702 get_page(page);
702 pages[i] = page + pfn_offset; 703 pages[i] = page + pfn_offset;
704 }
703 705
704 if (vmas) 706 if (vmas)
705 vmas[i] = vma; 707 vmas[i] = vma;
diff --git a/mm/memory.c b/mm/memory.c
index 8d8f52569f32..0ec7bc644271 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -87,7 +87,7 @@ int randomize_va_space __read_mostly = 1;
87static int __init disable_randmaps(char *s) 87static int __init disable_randmaps(char *s)
88{ 88{
89 randomize_va_space = 0; 89 randomize_va_space = 0;
90 return 0; 90 return 1;
91} 91}
92__setup("norandmaps", disable_randmaps); 92__setup("norandmaps", disable_randmaps);
93 93
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 39aa9d129612..e5fd5385f0cc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -397,18 +397,24 @@ void free_swap_and_cache(swp_entry_t entry)
397 397
398 p = swap_info_get(entry); 398 p = swap_info_get(entry);
399 if (p) { 399 if (p) {
400 if (swap_entry_free(p, swp_offset(entry)) == 1) 400 if (swap_entry_free(p, swp_offset(entry)) == 1) {
401 page = find_trylock_page(&swapper_space, entry.val); 401 page = find_get_page(&swapper_space, entry.val);
402 if (page && unlikely(TestSetPageLocked(page))) {
403 page_cache_release(page);
404 page = NULL;
405 }
406 }
402 spin_unlock(&swap_lock); 407 spin_unlock(&swap_lock);
403 } 408 }
404 if (page) { 409 if (page) {
405 int one_user; 410 int one_user;
406 411
407 BUG_ON(PagePrivate(page)); 412 BUG_ON(PagePrivate(page));
408 page_cache_get(page);
409 one_user = (page_count(page) == 2); 413 one_user = (page_count(page) == 2);
410 /* Only cache user (+us), or swap space full? Free it! */ 414 /* Only cache user (+us), or swap space full? Free it! */
411 if (!PageWriteback(page) && (one_user || vm_swap_full())) { 415 /* Also recheck PageSwapCache after page is locked (above) */
416 if (PageSwapCache(page) && !PageWriteback(page) &&
417 (one_user || vm_swap_full())) {
412 delete_from_swap_cache(page); 418 delete_from_swap_cache(page);
413 SetPageDirty(page); 419 SetPageDirty(page);
414 } 420 }