aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memory-failure.c13
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/msync.c3
-rw-r--r--mm/page_alloc.c16
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c117
-rw-r--r--mm/slub.c6
-rw-r--r--mm/truncate.c11
10 files changed, 117 insertions, 65 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd573d2..9221c02ed9e2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2604 } else { 2604 } else {
2605 if (cow) 2605 if (cow)
2606 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2607 entry = huge_ptep_get(src_pte);
2607 ptepage = pte_page(entry); 2608 ptepage = pte_page(entry);
2608 get_page(ptepage); 2609 get_page(ptepage);
2609 page_dup_rmap(ptepage); 2610 page_dup_rmap(ptepage);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index cd8989c1027e..7211a73ba14d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
435 if (av == NULL) /* Not actually mapped anymore */ 435 if (av == NULL) /* Not actually mapped anymore */
436 return; 436 return;
437 437
438 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff = page_to_pgoff(page);
439 read_lock(&tasklist_lock); 439 read_lock(&tasklist_lock);
440 for_each_process (tsk) { 440 for_each_process (tsk) {
441 struct anon_vma_chain *vmac; 441 struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
469 mutex_lock(&mapping->i_mmap_mutex); 469 mutex_lock(&mapping->i_mmap_mutex);
470 read_lock(&tasklist_lock); 470 read_lock(&tasklist_lock);
471 for_each_process(tsk) { 471 for_each_process(tsk) {
472 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 472 pgoff_t pgoff = page_to_pgoff(page);
473 struct task_struct *t = task_early_kill(tsk, force_early); 473 struct task_struct *t = task_early_kill(tsk, force_early);
474 474
475 if (!t) 475 if (!t)
@@ -895,7 +895,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
895 struct page *hpage = *hpagep; 895 struct page *hpage = *hpagep;
896 struct page *ppage; 896 struct page *ppage;
897 897
898 if (PageReserved(p) || PageSlab(p)) 898 if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
899 return SWAP_SUCCESS; 899 return SWAP_SUCCESS;
900 900
901 /* 901 /*
@@ -1159,9 +1159,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1159 action_result(pfn, "free buddy, 2nd try", DELAYED); 1159 action_result(pfn, "free buddy, 2nd try", DELAYED);
1160 return 0; 1160 return 0;
1161 } 1161 }
1162 action_result(pfn, "non LRU", IGNORED);
1163 put_page(p);
1164 return -EBUSY;
1165 } 1162 }
1166 } 1163 }
1167 1164
@@ -1194,6 +1191,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1194 return 0; 1191 return 0;
1195 } 1192 }
1196 1193
1194 if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
1195 goto identify_page_state;
1196
1197 /* 1197 /*
1198 * For error on the tail page, we should set PG_hwpoison 1198 * For error on the tail page, we should set PG_hwpoison
1199 * on the head page to show that the hugepage is hwpoisoned 1199 * on the head page to show that the hugepage is hwpoisoned
@@ -1243,6 +1243,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1243 goto out; 1243 goto out;
1244 } 1244 }
1245 1245
1246identify_page_state:
1246 res = -EBUSY; 1247 res = -EBUSY;
1247 /* 1248 /*
1248 * The first check uses the current page flags which may not have any 1249 * The first check uses the current page flags which may not have any
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9fcf1f2..7e8d8205b610 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2882 * if page by the offset is not ready to be mapped (cold cache or 2882 * if page by the offset is not ready to be mapped (cold cache or
2883 * something). 2883 * something).
2884 */ 2884 */
2885 if (vma->vm_ops->map_pages && fault_around_pages() > 1) { 2885 if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
2886 fault_around_pages() > 1) {
2886 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2887 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2887 do_fault_around(vma, address, pte, pgoff, flags); 2888 do_fault_around(vma, address, pte, pgoff, flags);
2888 if (!pte_same(*pte, orig_pte)) 2889 if (!pte_same(*pte, orig_pte))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eb58de19f815..8f5330d74f47 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2139,7 +2139,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2139 } else 2139 } else
2140 *new = *old; 2140 *new = *old;
2141 2141
2142 rcu_read_lock();
2143 if (current_cpuset_is_being_rebound()) { 2142 if (current_cpuset_is_being_rebound()) {
2144 nodemask_t mems = cpuset_mems_allowed(current); 2143 nodemask_t mems = cpuset_mems_allowed(current);
2145 if (new->flags & MPOL_F_REBINDING) 2144 if (new->flags & MPOL_F_REBINDING)
@@ -2147,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2147 else 2146 else
2148 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 2147 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2149 } 2148 }
2150 rcu_read_unlock();
2151 atomic_set(&new->refcnt, 1); 2149 atomic_set(&new->refcnt, 1);
2152 return new; 2150 return new;
2153} 2151}
diff --git a/mm/msync.c b/mm/msync.c
index a5c673669ca6..992a1673d488 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
78 goto out_unlock; 78 goto out_unlock;
79 } 79 }
80 file = vma->vm_file; 80 file = vma->vm_file;
81 fstart = start + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 81 fstart = (start - vma->vm_start) +
82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
82 fend = fstart + (min(end, vma->vm_end) - start) - 1; 83 fend = fstart + (min(end, vma->vm_end) - start) - 1;
83 start = vma->vm_end; 84 start = vma->vm_end;
84 if ((flags & MS_SYNC) && file && 85 if ((flags & MS_SYNC) && file &&
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 20d17f8266fe..0ea758b898fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -816,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
816 set_page_count(p, 0); 816 set_page_count(p, 0);
817 } while (++p, --i); 817 } while (++p, --i);
818 818
819 set_page_refcounted(page);
820 set_pageblock_migratetype(page, MIGRATE_CMA); 819 set_pageblock_migratetype(page, MIGRATE_CMA);
821 __free_pages(page, pageblock_order); 820
821 if (pageblock_order >= MAX_ORDER) {
822 i = pageblock_nr_pages;
823 p = page;
824 do {
825 set_page_refcounted(p);
826 __free_pages(p, MAX_ORDER - 1);
827 p += MAX_ORDER_NR_PAGES;
828 } while (i -= MAX_ORDER_NR_PAGES);
829 } else {
830 set_page_refcounted(page);
831 __free_pages(page, pageblock_order);
832 }
833
822 adjust_managed_page_count(page, pageblock_nr_pages); 834 adjust_managed_page_count(page, pageblock_nr_pages);
823} 835}
824#endif 836#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94ebbd09e..22a4a7699cdb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
517static inline unsigned long 517static inline unsigned long
518__vma_address(struct page *page, struct vm_area_struct *vma) 518__vma_address(struct page *page, struct vm_area_struct *vma)
519{ 519{
520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 520 pgoff_t pgoff = page_to_pgoff(page);
521
522 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page));
524
525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526} 522}
527 523
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1639static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1640{ 1636{
1641 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1642 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1638 pgoff_t pgoff = page_to_pgoff(page);
1643 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1644 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1645 1641
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1680static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1681{ 1677{
1682 struct address_space *mapping = page->mapping; 1678 struct address_space *mapping = page->mapping;
1683 pgoff_t pgoff = page->index << compound_order(page); 1679 pgoff_t pgoff = page_to_pgoff(page);
1684 struct vm_area_struct *vma; 1680 struct vm_area_struct *vma;
1685 int ret = SWAP_AGAIN; 1681 int ret = SWAP_AGAIN;
1686 1682
diff --git a/mm/shmem.c b/mm/shmem.c
index 8f419cff9e34..af68b15a8fc1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
85 * a time): we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */ 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
89 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
90 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
468 return; 468 return;
469 469
470 index = start; 470 index = start;
471 for ( ; ; ) { 471 while (index < end) {
472 cond_resched(); 472 cond_resched();
473 473
474 pvec.nr = find_get_entries(mapping, index, 474 pvec.nr = find_get_entries(mapping, index,
475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 475 min(end - index, (pgoff_t)PAGEVEC_SIZE),
476 pvec.pages, indices); 476 pvec.pages, indices);
477 if (!pvec.nr) { 477 if (!pvec.nr) {
478 if (index == start || unfalloc) 478 /* If all gone or hole-punch or unfalloc, we're done */
479 if (index == start || end != -1)
479 break; 480 break;
481 /* But if truncating, restart to make sure all gone */
480 index = start; 482 index = start;
481 continue; 483 continue;
482 } 484 }
483 if ((index == start || unfalloc) && indices[0] >= end) {
484 pagevec_remove_exceptionals(&pvec);
485 pagevec_release(&pvec);
486 break;
487 }
488 mem_cgroup_uncharge_start(); 485 mem_cgroup_uncharge_start();
489 for (i = 0; i < pagevec_count(&pvec); i++) { 486 for (i = 0; i < pagevec_count(&pvec); i++) {
490 struct page *page = pvec.pages[i]; 487 struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
496 if (radix_tree_exceptional_entry(page)) { 493 if (radix_tree_exceptional_entry(page)) {
497 if (unfalloc) 494 if (unfalloc)
498 continue; 495 continue;
499 nr_swaps_freed += !shmem_free_swap(mapping, 496 if (shmem_free_swap(mapping, index, page)) {
500 index, page); 497 /* Swap was replaced by page: retry */
498 index--;
499 break;
500 }
501 nr_swaps_freed++;
501 continue; 502 continue;
502 } 503 }
503 504
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
506 if (page->mapping == mapping) { 507 if (page->mapping == mapping) {
507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 VM_BUG_ON_PAGE(PageWriteback(page), page);
508 truncate_inode_page(mapping, page); 509 truncate_inode_page(mapping, page);
510 } else {
511 /* Page was replaced by swap: retry */
512 unlock_page(page);
513 index--;
514 break;
509 } 515 }
510 } 516 }
511 unlock_page(page); 517 unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
760 spin_lock(&inode->i_lock); 766 spin_lock(&inode->i_lock);
761 shmem_falloc = inode->i_private; 767 shmem_falloc = inode->i_private;
762 if (shmem_falloc && 768 if (shmem_falloc &&
763 !shmem_falloc->mode && 769 !shmem_falloc->waitq &&
764 index >= shmem_falloc->start && 770 index >= shmem_falloc->start &&
765 index < shmem_falloc->next) 771 index < shmem_falloc->next)
766 shmem_falloc->nr_unswapped++; 772 shmem_falloc->nr_unswapped++;
@@ -1029,6 +1035,9 @@ repeat:
1029 goto failed; 1035 goto failed;
1030 } 1036 }
1031 1037
1038 if (page && sgp == SGP_WRITE)
1039 mark_page_accessed(page);
1040
1032 /* fallocated page? */ 1041 /* fallocated page? */
1033 if (page && !PageUptodate(page)) { 1042 if (page && !PageUptodate(page)) {
1034 if (sgp != SGP_READ) 1043 if (sgp != SGP_READ)
@@ -1110,6 +1119,9 @@ repeat:
1110 shmem_recalc_inode(inode); 1119 shmem_recalc_inode(inode);
1111 spin_unlock(&info->lock); 1120 spin_unlock(&info->lock);
1112 1121
1122 if (sgp == SGP_WRITE)
1123 mark_page_accessed(page);
1124
1113 delete_from_swap_cache(page); 1125 delete_from_swap_cache(page);
1114 set_page_dirty(page); 1126 set_page_dirty(page);
1115 swap_free(swap); 1127 swap_free(swap);
@@ -1136,6 +1148,9 @@ repeat:
1136 1148
1137 __SetPageSwapBacked(page); 1149 __SetPageSwapBacked(page);
1138 __set_page_locked(page); 1150 __set_page_locked(page);
1151 if (sgp == SGP_WRITE)
1152 init_page_accessed(page);
1153
1139 error = mem_cgroup_charge_file(page, current->mm, 1154 error = mem_cgroup_charge_file(page, current->mm,
1140 gfp & GFP_RECLAIM_MASK); 1155 gfp & GFP_RECLAIM_MASK);
1141 if (error) 1156 if (error)
@@ -1239,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1239 * Trinity finds that probing a hole which tmpfs is punching can 1254 * Trinity finds that probing a hole which tmpfs is punching can
1240 * prevent the hole-punch from ever completing: which in turn 1255 * prevent the hole-punch from ever completing: which in turn
1241 * locks writers out with its hold on i_mutex. So refrain from 1256 * locks writers out with its hold on i_mutex. So refrain from
1242 * faulting pages into the hole while it's being punched, and 1257 * faulting pages into the hole while it's being punched. Although
1243 * wait on i_mutex to be released if vmf->flags permits. 1258 * shmem_undo_range() does remove the additions, it may be unable to
1259 * keep up, as each new page needs its own unmap_mapping_range() call,
1260 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1261 *
1262 * It does not matter if we sometimes reach this check just before the
1263 * hole-punch begins, so that one fault then races with the punch:
1264 * we just need to make racing faults a rare case.
1265 *
1266 * The implementation below would be much simpler if we just used a
1267 * standard mutex or completion: but we cannot take i_mutex in fault,
1268 * and bloating every shmem inode for this unlikely case would be sad.
1244 */ 1269 */
1245 if (unlikely(inode->i_private)) { 1270 if (unlikely(inode->i_private)) {
1246 struct shmem_falloc *shmem_falloc; 1271 struct shmem_falloc *shmem_falloc;
1247 1272
1248 spin_lock(&inode->i_lock); 1273 spin_lock(&inode->i_lock);
1249 shmem_falloc = inode->i_private; 1274 shmem_falloc = inode->i_private;
1250 if (!shmem_falloc || 1275 if (shmem_falloc &&
1251 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || 1276 shmem_falloc->waitq &&
1252 vmf->pgoff < shmem_falloc->start || 1277 vmf->pgoff >= shmem_falloc->start &&
1253 vmf->pgoff >= shmem_falloc->next) 1278 vmf->pgoff < shmem_falloc->next) {
1254 shmem_falloc = NULL; 1279 wait_queue_head_t *shmem_falloc_waitq;
1255 spin_unlock(&inode->i_lock); 1280 DEFINE_WAIT(shmem_fault_wait);
1256 /* 1281
1257 * i_lock has protected us from taking shmem_falloc seriously 1282 ret = VM_FAULT_NOPAGE;
1258 * once return from shmem_fallocate() went back up that stack.
1259 * i_lock does not serialize with i_mutex at all, but it does
1260 * not matter if sometimes we wait unnecessarily, or sometimes
1261 * miss out on waiting: we just need to make those cases rare.
1262 */
1263 if (shmem_falloc) {
1264 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1283 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1265 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1284 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1285 /* It's polite to up mmap_sem if we can */
1266 up_read(&vma->vm_mm->mmap_sem); 1286 up_read(&vma->vm_mm->mmap_sem);
1267 mutex_lock(&inode->i_mutex); 1287 ret = VM_FAULT_RETRY;
1268 mutex_unlock(&inode->i_mutex);
1269 return VM_FAULT_RETRY;
1270 } 1288 }
1271 /* cond_resched? Leave that to GUP or return to user */ 1289
1272 return VM_FAULT_NOPAGE; 1290 shmem_falloc_waitq = shmem_falloc->waitq;
1291 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1292 TASK_UNINTERRUPTIBLE);
1293 spin_unlock(&inode->i_lock);
1294 schedule();
1295
1296 /*
1297 * shmem_falloc_waitq points into the shmem_fallocate()
1298 * stack of the hole-punching task: shmem_falloc_waitq
1299 * is usually invalid by the time we reach here, but
1300 * finish_wait() does not dereference it in that case;
1301 * though i_lock needed lest racing with wake_up_all().
1302 */
1303 spin_lock(&inode->i_lock);
1304 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1305 spin_unlock(&inode->i_lock);
1306 return ret;
1273 } 1307 }
1308 spin_unlock(&inode->i_lock);
1274 } 1309 }
1275 1310
1276 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1311 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1412,13 +1447,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
1412 loff_t pos, unsigned len, unsigned flags, 1447 loff_t pos, unsigned len, unsigned flags,
1413 struct page **pagep, void **fsdata) 1448 struct page **pagep, void **fsdata)
1414{ 1449{
1415 int ret;
1416 struct inode *inode = mapping->host; 1450 struct inode *inode = mapping->host;
1417 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1451 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1418 ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1452 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1419 if (ret == 0 && *pagep)
1420 init_page_accessed(*pagep);
1421 return ret;
1422} 1453}
1423 1454
1424static int 1455static int
@@ -1769,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1769 1800
1770 mutex_lock(&inode->i_mutex); 1801 mutex_lock(&inode->i_mutex);
1771 1802
1772 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1773
1774 if (mode & FALLOC_FL_PUNCH_HOLE) { 1803 if (mode & FALLOC_FL_PUNCH_HOLE) {
1775 struct address_space *mapping = file->f_mapping; 1804 struct address_space *mapping = file->f_mapping;
1776 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1805 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1777 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1806 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1807 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1778 1808
1809 shmem_falloc.waitq = &shmem_falloc_waitq;
1779 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1810 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1780 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1811 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1781 spin_lock(&inode->i_lock); 1812 spin_lock(&inode->i_lock);
@@ -1787,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1787 1 + unmap_end - unmap_start, 0); 1818 1 + unmap_end - unmap_start, 0);
1788 shmem_truncate_range(inode, offset, offset + len - 1); 1819 shmem_truncate_range(inode, offset, offset + len - 1);
1789 /* No need to unmap again: hole-punching leaves COWed pages */ 1820 /* No need to unmap again: hole-punching leaves COWed pages */
1821
1822 spin_lock(&inode->i_lock);
1823 inode->i_private = NULL;
1824 wake_up_all(&shmem_falloc_waitq);
1825 spin_unlock(&inode->i_lock);
1790 error = 0; 1826 error = 0;
1791 goto undone; 1827 goto out;
1792 } 1828 }
1793 1829
1794 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1830 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1804,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1804 goto out; 1840 goto out;
1805 } 1841 }
1806 1842
1843 shmem_falloc.waitq = NULL;
1807 shmem_falloc.start = start; 1844 shmem_falloc.start = start;
1808 shmem_falloc.next = start; 1845 shmem_falloc.next = start;
1809 shmem_falloc.nr_falloced = 0; 1846 shmem_falloc.nr_falloced = 0;
diff --git a/mm/slub.c b/mm/slub.c
index b2b047327d76..73004808537e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1881,7 +1881,7 @@ redo:
1881 1881
1882 new.frozen = 0; 1882 new.frozen = 0;
1883 1883
1884 if (!new.inuse && n->nr_partial > s->min_partial) 1884 if (!new.inuse && n->nr_partial >= s->min_partial)
1885 m = M_FREE; 1885 m = M_FREE;
1886 else if (new.freelist) { 1886 else if (new.freelist) {
1887 m = M_PARTIAL; 1887 m = M_PARTIAL;
@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1992 new.freelist, new.counters, 1992 new.freelist, new.counters,
1993 "unfreezing slab")); 1993 "unfreezing slab"));
1994 1994
1995 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) { 1995 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
1996 page->next = discard_page; 1996 page->next = discard_page;
1997 discard_page = page; 1997 discard_page = page;
1998 } else { 1998 } else {
@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2620 return; 2620 return;
2621 } 2621 }
2622 2622
2623 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) 2623 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2624 goto slab_empty; 2624 goto slab_empty;
2625 2625
2626 /* 2626 /*
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c814bebf..eda247307164 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
355 for ( ; ; ) { 355 for ( ; ; ) {
356 cond_resched(); 356 cond_resched();
357 if (!pagevec_lookup_entries(&pvec, mapping, index, 357 if (!pagevec_lookup_entries(&pvec, mapping, index,
358 min(end - index, (pgoff_t)PAGEVEC_SIZE), 358 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359 indices)) { 359 /* If all gone from start onwards, we're done */
360 if (index == start) 360 if (index == start)
361 break; 361 break;
362 /* Otherwise restart to make sure all gone */
362 index = start; 363 index = start;
363 continue; 364 continue;
364 } 365 }
365 if (index == start && indices[0] >= end) { 366 if (index == start && indices[0] >= end) {
367 /* All gone out of hole to be punched, we're done */
366 pagevec_remove_exceptionals(&pvec); 368 pagevec_remove_exceptionals(&pvec);
367 pagevec_release(&pvec); 369 pagevec_release(&pvec);
368 break; 370 break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
373 375
374 /* We rely upon deletion not changing page->index */ 376 /* We rely upon deletion not changing page->index */
375 index = indices[i]; 377 index = indices[i];
376 if (index >= end) 378 if (index >= end) {
379 /* Restart punch to make sure all gone */
380 index = start - 1;
377 break; 381 break;
382 }
378 383
379 if (radix_tree_exceptional_entry(page)) { 384 if (radix_tree_exceptional_entry(page)) {
380 clear_exceptional_entry(mapping, index, page); 385 clear_exceptional_entry(mapping, index, page);