aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-08-16 11:40:23 -0400
committerIngo Molnar <mingo@kernel.org>2013-08-16 11:40:23 -0400
commitd3ec3a1fd08f705d1b319db4113590944bcde749 (patch)
tree598a0da59e027b455f11beab24a96ef1e56fbbcb /mm
parent8f898fbbe5ee5e20a77c4074472a1fd088dc47d1 (diff)
parentd4e4ab86bcba5a72779c43dc1459f71fea3d89c8 (diff)
Merge tag 'v3.11-rc5' into sched/core
Merge Linux 3.11-rc5, to pick up the latest fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slub.c3
-rw-r--r--mm/swap.c29
-rw-r--r--mm/vmpressure.c28
-rw-r--r--mm/zbud.c2
9 files changed, 39 insertions, 39 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 243e710c6039..a92012a71702 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page,
1620 ((1L << PG_referenced) | 1620 ((1L << PG_referenced) |
1621 (1L << PG_swapbacked) | 1621 (1L << PG_swapbacked) |
1622 (1L << PG_mlocked) | 1622 (1L << PG_mlocked) |
1623 (1L << PG_uptodate))); 1623 (1L << PG_uptodate) |
1624 (1L << PG_active) |
1625 (1L << PG_unevictable)));
1624 page_tail->flags |= (1L << PG_dirty); 1626 page_tail->flags |= (1L << PG_dirty);
1625 1627
1626 /* clear PageTail before overwriting first_page */ 1628 /* clear PageTail before overwriting first_page */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 00a7a664b9c1..c290a1cf3862 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6335,6 +6335,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
6335 mem_cgroup_invalidate_reclaim_iterators(memcg); 6335 mem_cgroup_invalidate_reclaim_iterators(memcg);
6336 mem_cgroup_reparent_charges(memcg); 6336 mem_cgroup_reparent_charges(memcg);
6337 mem_cgroup_destroy_all_caches(memcg); 6337 mem_cgroup_destroy_all_caches(memcg);
6338 vmpressure_cleanup(&memcg->vmpressure);
6338} 6339}
6339 6340
6340static void mem_cgroup_css_free(struct cgroup *cont) 6341static void mem_cgroup_css_free(struct cgroup *cont)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 74310017296e..4baf12e534d1 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
732 if (prev) { 732 if (prev) {
733 vma = prev; 733 vma = prev;
734 next = vma->vm_next; 734 next = vma->vm_next;
735 continue; 735 if (mpol_equal(vma_policy(vma), new_pol))
736 continue;
737 /* vma_merge() joined vma && vma->next, case 8 */
738 goto replace;
736 } 739 }
737 if (vma->vm_start != vmstart) { 740 if (vma->vm_start != vmstart) {
738 err = split_vma(vma->vm_mm, vma, vmstart, 1); 741 err = split_vma(vma->vm_mm, vma, vmstart, 1);
@@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
744 if (err) 747 if (err)
745 goto out; 748 goto out;
746 } 749 }
750 replace:
747 err = vma_replace_policy(vma, new_pol); 751 err = vma_replace_policy(vma, new_pol);
748 if (err) 752 if (err)
749 goto out; 753 goto out;
diff --git a/mm/mmap.c b/mm/mmap.c
index fbad7b091090..1edbaa3136c3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end);
865 if (next->anon_vma) 865 if (next->anon_vma)
866 anon_vma_merge(vma, next); 866 anon_vma_merge(vma, next);
867 mm->map_count--; 867 mm->map_count--;
868 vma_set_policy(vma, vma_policy(next)); 868 mpol_put(vma_policy(next));
869 kmem_cache_free(vm_area_cachep, next); 869 kmem_cache_free(vm_area_cachep, next);
870 /* 870 /*
871 * In mprotect's case 6 (see comments on vma_merge), 871 * In mprotect's case 6 (see comments on vma_merge),
diff --git a/mm/shmem.c b/mm/shmem.c
index a87990cf9f94..8335dbd3fc35 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1798 } 1798 }
1799 } 1799 }
1800 1800
1801 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1801 if (offset >= 0)
1802 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
1802 mutex_unlock(&inode->i_mutex); 1803 mutex_unlock(&inode->i_mutex);
1803 return offset; 1804 return offset;
1804} 1805}
diff --git a/mm/slub.c b/mm/slub.c
index 2b02d666bf63..e3ba1f2cf60c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1968 int pages; 1968 int pages;
1969 int pobjects; 1969 int pobjects;
1970 1970
1971 if (!s->cpu_partial)
1972 return;
1973
1974 do { 1971 do {
1975 pages = 0; 1972 pages = 0;
1976 pobjects = 0; 1973 pobjects = 0;
diff --git a/mm/swap.c b/mm/swap.c
index 4a1d0d2c52fa..62b78a6e224f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add);
512 */ 512 */
513void lru_cache_add(struct page *page) 513void lru_cache_add(struct page *page)
514{ 514{
515 if (PageActive(page)) { 515 VM_BUG_ON(PageActive(page) && PageUnevictable(page));
516 VM_BUG_ON(PageUnevictable(page));
517 } else if (PageUnevictable(page)) {
518 VM_BUG_ON(PageActive(page));
519 }
520
521 VM_BUG_ON(PageLRU(page)); 516 VM_BUG_ON(PageLRU(page));
522 __lru_cache_add(page); 517 __lru_cache_add(page);
523} 518}
@@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page)
539 534
540 spin_lock_irq(&zone->lru_lock); 535 spin_lock_irq(&zone->lru_lock);
541 lruvec = mem_cgroup_page_lruvec(page, zone); 536 lruvec = mem_cgroup_page_lruvec(page, zone);
537 ClearPageActive(page);
542 SetPageUnevictable(page); 538 SetPageUnevictable(page);
543 SetPageLRU(page); 539 SetPageLRU(page);
544 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 540 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
@@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release);
774void lru_add_page_tail(struct page *page, struct page *page_tail, 770void lru_add_page_tail(struct page *page, struct page *page_tail,
775 struct lruvec *lruvec, struct list_head *list) 771 struct lruvec *lruvec, struct list_head *list)
776{ 772{
777 int uninitialized_var(active);
778 enum lru_list lru;
779 const int file = 0; 773 const int file = 0;
780 774
781 VM_BUG_ON(!PageHead(page)); 775 VM_BUG_ON(!PageHead(page));
@@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
787 if (!list) 781 if (!list)
788 SetPageLRU(page_tail); 782 SetPageLRU(page_tail);
789 783
790 if (page_evictable(page_tail)) {
791 if (PageActive(page)) {
792 SetPageActive(page_tail);
793 active = 1;
794 lru = LRU_ACTIVE_ANON;
795 } else {
796 active = 0;
797 lru = LRU_INACTIVE_ANON;
798 }
799 } else {
800 SetPageUnevictable(page_tail);
801 lru = LRU_UNEVICTABLE;
802 }
803
804 if (likely(PageLRU(page))) 784 if (likely(PageLRU(page)))
805 list_add_tail(&page_tail->lru, &page->lru); 785 list_add_tail(&page_tail->lru, &page->lru);
806 else if (list) { 786 else if (list) {
@@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
816 * Use the standard add function to put page_tail on the list, 796 * Use the standard add function to put page_tail on the list,
817 * but then correct its position so they all end up in order. 797 * but then correct its position so they all end up in order.
818 */ 798 */
819 add_page_to_lru_list(page_tail, lruvec, lru); 799 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
820 list_head = page_tail->lru.prev; 800 list_head = page_tail->lru.prev;
821 list_move_tail(&page_tail->lru, list_head); 801 list_move_tail(&page_tail->lru, list_head);
822 } 802 }
823 803
824 if (!PageUnevictable(page)) 804 if (!PageUnevictable(page))
825 update_page_reclaim_stat(lruvec, file, active); 805 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
826} 806}
827#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 807#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
828 808
@@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
833 int active = PageActive(page); 813 int active = PageActive(page);
834 enum lru_list lru = page_lru(page); 814 enum lru_list lru = page_lru(page);
835 815
836 VM_BUG_ON(PageUnevictable(page));
837 VM_BUG_ON(PageLRU(page)); 816 VM_BUG_ON(PageLRU(page));
838 817
839 SetPageLRU(page); 818 SetPageLRU(page);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 736a6011c2c8..0c1e37d829fa 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -180,12 +180,12 @@ static void vmpressure_work_fn(struct work_struct *work)
180 if (!vmpr->scanned) 180 if (!vmpr->scanned)
181 return; 181 return;
182 182
183 mutex_lock(&vmpr->sr_lock); 183 spin_lock(&vmpr->sr_lock);
184 scanned = vmpr->scanned; 184 scanned = vmpr->scanned;
185 reclaimed = vmpr->reclaimed; 185 reclaimed = vmpr->reclaimed;
186 vmpr->scanned = 0; 186 vmpr->scanned = 0;
187 vmpr->reclaimed = 0; 187 vmpr->reclaimed = 0;
188 mutex_unlock(&vmpr->sr_lock); 188 spin_unlock(&vmpr->sr_lock);
189 189
190 do { 190 do {
191 if (vmpressure_event(vmpr, scanned, reclaimed)) 191 if (vmpressure_event(vmpr, scanned, reclaimed))
@@ -240,13 +240,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
240 if (!scanned) 240 if (!scanned)
241 return; 241 return;
242 242
243 mutex_lock(&vmpr->sr_lock); 243 spin_lock(&vmpr->sr_lock);
244 vmpr->scanned += scanned; 244 vmpr->scanned += scanned;
245 vmpr->reclaimed += reclaimed; 245 vmpr->reclaimed += reclaimed;
246 scanned = vmpr->scanned; 246 scanned = vmpr->scanned;
247 mutex_unlock(&vmpr->sr_lock); 247 spin_unlock(&vmpr->sr_lock);
248 248
249 if (scanned < vmpressure_win || work_pending(&vmpr->work)) 249 if (scanned < vmpressure_win)
250 return; 250 return;
251 schedule_work(&vmpr->work); 251 schedule_work(&vmpr->work);
252} 252}
@@ -367,8 +367,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
367 */ 367 */
368void vmpressure_init(struct vmpressure *vmpr) 368void vmpressure_init(struct vmpressure *vmpr)
369{ 369{
370 mutex_init(&vmpr->sr_lock); 370 spin_lock_init(&vmpr->sr_lock);
371 mutex_init(&vmpr->events_lock); 371 mutex_init(&vmpr->events_lock);
372 INIT_LIST_HEAD(&vmpr->events); 372 INIT_LIST_HEAD(&vmpr->events);
373 INIT_WORK(&vmpr->work, vmpressure_work_fn); 373 INIT_WORK(&vmpr->work, vmpressure_work_fn);
374} 374}
375
376/**
377 * vmpressure_cleanup() - shuts down vmpressure control structure
378 * @vmpr: Structure to be cleaned up
379 *
380 * This function should be called before the structure in which it is
381 * embedded is cleaned up.
382 */
383void vmpressure_cleanup(struct vmpressure *vmpr)
384{
385 /*
386 * Make sure there is no pending work before eventfd infrastructure
387 * goes away.
388 */
389 flush_work(&vmpr->work);
390}
diff --git a/mm/zbud.c b/mm/zbud.c
index 9bb4710e3589..ad1e781284fd 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
257 257
258 if (size <= 0 || gfp & __GFP_HIGHMEM) 258 if (size <= 0 || gfp & __GFP_HIGHMEM)
259 return -EINVAL; 259 return -EINVAL;
260 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED) 260 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
261 return -ENOSPC; 261 return -ENOSPC;
262 chunks = size_to_chunks(size); 262 chunks = size_to_chunks(size);
263 spin_lock(&pool->lock); 263 spin_lock(&pool->lock);