aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory-failure.c9
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/msync.c3
-rw-r--r--mm/page_alloc.c16
-rw-r--r--mm/shmem.c15
-rw-r--r--mm/slub.c6
6 files changed, 34 insertions, 17 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index cd8989c1027e..c6399e328931 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -895,7 +895,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
895 struct page *hpage = *hpagep; 895 struct page *hpage = *hpagep;
896 struct page *ppage; 896 struct page *ppage;
897 897
898 if (PageReserved(p) || PageSlab(p)) 898 if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
899 return SWAP_SUCCESS; 899 return SWAP_SUCCESS;
900 900
901 /* 901 /*
@@ -1159,9 +1159,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1159 action_result(pfn, "free buddy, 2nd try", DELAYED); 1159 action_result(pfn, "free buddy, 2nd try", DELAYED);
1160 return 0; 1160 return 0;
1161 } 1161 }
1162 action_result(pfn, "non LRU", IGNORED);
1163 put_page(p);
1164 return -EBUSY;
1165 } 1162 }
1166 } 1163 }
1167 1164
@@ -1194,6 +1191,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1194 return 0; 1191 return 0;
1195 } 1192 }
1196 1193
1194 if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
1195 goto identify_page_state;
1196
1197 /* 1197 /*
1198 * For error on the tail page, we should set PG_hwpoison 1198 * For error on the tail page, we should set PG_hwpoison
1199 * on the head page to show that the hugepage is hwpoisoned 1199 * on the head page to show that the hugepage is hwpoisoned
@@ -1243,6 +1243,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1243 goto out; 1243 goto out;
1244 } 1244 }
1245 1245
1246identify_page_state:
1246 res = -EBUSY; 1247 res = -EBUSY;
1247 /* 1248 /*
1248 * The first check uses the current page flags which may not have any 1249 * The first check uses the current page flags which may not have any
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eb58de19f815..8f5330d74f47 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2139,7 +2139,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2139 } else 2139 } else
2140 *new = *old; 2140 *new = *old;
2141 2141
2142 rcu_read_lock();
2143 if (current_cpuset_is_being_rebound()) { 2142 if (current_cpuset_is_being_rebound()) {
2144 nodemask_t mems = cpuset_mems_allowed(current); 2143 nodemask_t mems = cpuset_mems_allowed(current);
2145 if (new->flags & MPOL_F_REBINDING) 2144 if (new->flags & MPOL_F_REBINDING)
@@ -2147,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2147 else 2146 else
2148 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 2147 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2149 } 2148 }
2150 rcu_read_unlock();
2151 atomic_set(&new->refcnt, 1); 2149 atomic_set(&new->refcnt, 1);
2152 return new; 2150 return new;
2153} 2151}
diff --git a/mm/msync.c b/mm/msync.c
index a5c673669ca6..992a1673d488 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
78 goto out_unlock; 78 goto out_unlock;
79 } 79 }
80 file = vma->vm_file; 80 file = vma->vm_file;
81 fstart = start + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 81 fstart = (start - vma->vm_start) +
82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
82 fend = fstart + (min(end, vma->vm_end) - start) - 1; 83 fend = fstart + (min(end, vma->vm_end) - start) - 1;
83 start = vma->vm_end; 84 start = vma->vm_end;
84 if ((flags & MS_SYNC) && file && 85 if ((flags & MS_SYNC) && file &&
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 20d17f8266fe..0ea758b898fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -816,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
816 set_page_count(p, 0); 816 set_page_count(p, 0);
817 } while (++p, --i); 817 } while (++p, --i);
818 818
819 set_page_refcounted(page);
820 set_pageblock_migratetype(page, MIGRATE_CMA); 819 set_pageblock_migratetype(page, MIGRATE_CMA);
821 __free_pages(page, pageblock_order); 820
821 if (pageblock_order >= MAX_ORDER) {
822 i = pageblock_nr_pages;
823 p = page;
824 do {
825 set_page_refcounted(p);
826 __free_pages(p, MAX_ORDER - 1);
827 p += MAX_ORDER_NR_PAGES;
828 } while (i -= MAX_ORDER_NR_PAGES);
829 } else {
830 set_page_refcounted(page);
831 __free_pages(page, pageblock_order);
832 }
833
822 adjust_managed_page_count(page, pageblock_nr_pages); 834 adjust_managed_page_count(page, pageblock_nr_pages);
823} 835}
824#endif 836#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 8f419cff9e34..1140f49b6ded 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1029,6 +1029,9 @@ repeat:
1029 goto failed; 1029 goto failed;
1030 } 1030 }
1031 1031
1032 if (page && sgp == SGP_WRITE)
1033 mark_page_accessed(page);
1034
1032 /* fallocated page? */ 1035 /* fallocated page? */
1033 if (page && !PageUptodate(page)) { 1036 if (page && !PageUptodate(page)) {
1034 if (sgp != SGP_READ) 1037 if (sgp != SGP_READ)
@@ -1110,6 +1113,9 @@ repeat:
1110 shmem_recalc_inode(inode); 1113 shmem_recalc_inode(inode);
1111 spin_unlock(&info->lock); 1114 spin_unlock(&info->lock);
1112 1115
1116 if (sgp == SGP_WRITE)
1117 mark_page_accessed(page);
1118
1113 delete_from_swap_cache(page); 1119 delete_from_swap_cache(page);
1114 set_page_dirty(page); 1120 set_page_dirty(page);
1115 swap_free(swap); 1121 swap_free(swap);
@@ -1136,6 +1142,9 @@ repeat:
1136 1142
1137 __SetPageSwapBacked(page); 1143 __SetPageSwapBacked(page);
1138 __set_page_locked(page); 1144 __set_page_locked(page);
1145 if (sgp == SGP_WRITE)
1146 init_page_accessed(page);
1147
1139 error = mem_cgroup_charge_file(page, current->mm, 1148 error = mem_cgroup_charge_file(page, current->mm,
1140 gfp & GFP_RECLAIM_MASK); 1149 gfp & GFP_RECLAIM_MASK);
1141 if (error) 1150 if (error)
@@ -1412,13 +1421,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
1412 loff_t pos, unsigned len, unsigned flags, 1421 loff_t pos, unsigned len, unsigned flags,
1413 struct page **pagep, void **fsdata) 1422 struct page **pagep, void **fsdata)
1414{ 1423{
1415 int ret;
1416 struct inode *inode = mapping->host; 1424 struct inode *inode = mapping->host;
1417 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1425 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1418 ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1426 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1419 if (ret == 0 && *pagep)
1420 init_page_accessed(*pagep);
1421 return ret;
1422} 1427}
1423 1428
1424static int 1429static int
diff --git a/mm/slub.c b/mm/slub.c
index b2b047327d76..73004808537e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1881,7 +1881,7 @@ redo:
1881 1881
1882 new.frozen = 0; 1882 new.frozen = 0;
1883 1883
1884 if (!new.inuse && n->nr_partial > s->min_partial) 1884 if (!new.inuse && n->nr_partial >= s->min_partial)
1885 m = M_FREE; 1885 m = M_FREE;
1886 else if (new.freelist) { 1886 else if (new.freelist) {
1887 m = M_PARTIAL; 1887 m = M_PARTIAL;
@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1992 new.freelist, new.counters, 1992 new.freelist, new.counters,
1993 "unfreezing slab")); 1993 "unfreezing slab"));
1994 1994
1995 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) { 1995 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
1996 page->next = discard_page; 1996 page->next = discard_page;
1997 discard_page = page; 1997 discard_page = page;
1998 } else { 1998 } else {
@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2620 return; 2620 return;
2621 } 2621 }
2622 2622
2623 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) 2623 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2624 goto slab_empty; 2624 goto slab_empty;
2625 2625
2626 /* 2626 /*