aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-06-12 00:27:11 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2014-06-12 00:28:09 -0400
commit9c1d5284c79fea050f115eadeec1dd1758e5c630 (patch)
tree4d16fd5aad7ff4931e985c0128c5747f23561f8a /mm
parent5f073850602084fbcbb987948ff3e70ae273f7d2 (diff)
parent9f12600fe425bc28f0ccba034a77783c09c15af4 (diff)
Merge commit '9f12600fe425bc28f0ccba034a77783c09c15af4' into for-linus
Backmerge of dcache.c changes from mainline. It's that, or complete rebase... Conflicts: fs/splice.c Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig15
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/kmemleak.c4
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c27
-rw-r--r--mm/memory-failure.c17
-rw-r--r--mm/mremap.c9
-rw-r--r--mm/percpu.c2
8 files changed, 55 insertions, 27 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index ebe5880c29d6..1b5a95f0fa01 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -581,3 +581,18 @@ config PGTABLE_MAPPING
581 581
582config GENERIC_EARLY_IOREMAP 582config GENERIC_EARLY_IOREMAP
583 bool 583 bool
584
585config MAX_STACK_SIZE_MB
586 int "Maximum user stack size for 32-bit processes (MB)"
587 default 80
588 range 8 256 if METAG
589 range 8 2048
590 depends on STACK_GROWSUP && (!64BIT || COMPAT)
591 help
592 This is the maximum stack size in Megabytes in the VM layout of 32-bit
593 user processes when the stack grows upwards (currently only on parisc
594 and metag arch). The stack will be located at the highest memory
595 address minus the given value, unless the RLIMIT_STACK hard limit is
596 changed to a smaller value in which case that is used.
597
598 A sane initial value is 80 MB.
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f724e3cdf24..7499ef19f1c1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
257{ 257{
258 int ret = 0; 258 int ret = 0;
259 /* Check for outstanding write errors */ 259 /* Check for outstanding write errors */
260 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 260 if (test_bit(AS_ENOSPC, &mapping->flags) &&
261 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
261 ret = -ENOSPC; 262 ret = -ENOSPC;
262 if (test_and_clear_bit(AS_EIO, &mapping->flags)) 263 if (test_bit(AS_EIO, &mapping->flags) &&
264 test_and_clear_bit(AS_EIO, &mapping->flags))
263 ret = -EIO; 265 ret = -EIO;
264 return ret; 266 return ret;
265} 267}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 91d67eaee050..8d2fcdfeff7f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1775,10 +1775,9 @@ void __init kmemleak_init(void)
1775 int i; 1775 int i;
1776 unsigned long flags; 1776 unsigned long flags;
1777 1777
1778 kmemleak_early_log = 0;
1779
1780#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1778#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1781 if (!kmemleak_skip_disable) { 1779 if (!kmemleak_skip_disable) {
1780 kmemleak_early_log = 0;
1782 kmemleak_disable(); 1781 kmemleak_disable();
1783 return; 1782 return;
1784 } 1783 }
@@ -1796,6 +1795,7 @@ void __init kmemleak_init(void)
1796 1795
1797 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1796 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1798 local_irq_save(flags); 1797 local_irq_save(flags);
1798 kmemleak_early_log = 0;
1799 if (kmemleak_error) { 1799 if (kmemleak_error) {
1800 local_irq_restore(flags); 1800 local_irq_restore(flags);
1801 return; 1801 return;
diff --git a/mm/madvise.c b/mm/madvise.c
index 539eeb96b323..a402f8fdc68e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
195 for (; start < end; start += PAGE_SIZE) { 195 for (; start < end; start += PAGE_SIZE) {
196 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 196 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
197 197
198 page = find_get_page(mapping, index); 198 page = find_get_entry(mapping, index);
199 if (!radix_tree_exceptional_entry(page)) { 199 if (!radix_tree_exceptional_entry(page)) {
200 if (page) 200 if (page)
201 page_cache_release(page); 201 page_cache_release(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c47dffdcb246..5177c6d4a2dd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1077 1077
1078 rcu_read_lock(); 1078 rcu_read_lock();
1079 do { 1079 do {
1080 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1080 /*
1081 if (unlikely(!memcg)) 1081 * Page cache insertions can happen withou an
1082 * actual mm context, e.g. during disk probing
1083 * on boot, loopback IO, acct() writes etc.
1084 */
1085 if (unlikely(!mm))
1082 memcg = root_mem_cgroup; 1086 memcg = root_mem_cgroup;
1087 else {
1088 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1089 if (unlikely(!memcg))
1090 memcg = root_mem_cgroup;
1091 }
1083 } while (!css_tryget(&memcg->css)); 1092 } while (!css_tryget(&memcg->css));
1084 rcu_read_unlock(); 1093 rcu_read_unlock();
1085 return memcg; 1094 return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3958 return 0; 3967 return 0;
3959 } 3968 }
3960 3969
3961 /* 3970 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3962 * Page cache insertions can happen without an actual mm 3971 if (!memcg)
3963 * context, e.g. during disk probing on boot. 3972 return -ENOMEM;
3964 */
3965 if (unlikely(!mm))
3966 memcg = root_mem_cgroup;
3967 else {
3968 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3969 if (!memcg)
3970 return -ENOMEM;
3971 }
3972 __mem_cgroup_commit_charge(memcg, page, 1, type, false); 3973 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
3973 return 0; 3974 return 0;
3974} 3975}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 35ef28acf137..9ccef39a9de2 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1081 return 0; 1081 return 0;
1082 } else if (PageHuge(hpage)) { 1082 } else if (PageHuge(hpage)) {
1083 /* 1083 /*
1084 * Check "just unpoisoned", "filter hit", and 1084 * Check "filter hit" and "race with other subpage."
1085 * "race with other subpage."
1086 */ 1085 */
1087 lock_page(hpage); 1086 lock_page(hpage);
1088 if (!PageHWPoison(hpage) 1087 if (PageHWPoison(hpage)) {
1089 || (hwpoison_filter(p) && TestClearPageHWPoison(p)) 1088 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1090 || (p != hpage && TestSetPageHWPoison(hpage))) { 1089 || (p != hpage && TestSetPageHWPoison(hpage))) {
1091 atomic_long_sub(nr_pages, &num_poisoned_pages); 1090 atomic_long_sub(nr_pages, &num_poisoned_pages);
1092 return 0; 1091 unlock_page(hpage);
1092 return 0;
1093 }
1093 } 1094 }
1094 set_page_hwpoison_huge_page(hpage); 1095 set_page_hwpoison_huge_page(hpage);
1095 res = dequeue_hwpoisoned_huge_page(hpage); 1096 res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1152 */ 1153 */
1153 if (!PageHWPoison(p)) { 1154 if (!PageHWPoison(p)) {
1154 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1155 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1156 atomic_long_sub(nr_pages, &num_poisoned_pages);
1157 put_page(hpage);
1155 res = 0; 1158 res = 0;
1156 goto out; 1159 goto out;
1157 } 1160 }
diff --git a/mm/mremap.c b/mm/mremap.c
index 0843feb66f3d..05f1180e9f21 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
194 break; 194 break;
195 if (pmd_trans_huge(*old_pmd)) { 195 if (pmd_trans_huge(*old_pmd)) {
196 int err = 0; 196 int err = 0;
197 if (extent == HPAGE_PMD_SIZE) 197 if (extent == HPAGE_PMD_SIZE) {
198 VM_BUG_ON(vma->vm_file || !vma->anon_vma);
199 /* See comment in move_ptes() */
200 if (need_rmap_locks)
201 anon_vma_lock_write(vma->anon_vma);
198 err = move_huge_pmd(vma, new_vma, old_addr, 202 err = move_huge_pmd(vma, new_vma, old_addr,
199 new_addr, old_end, 203 new_addr, old_end,
200 old_pmd, new_pmd); 204 old_pmd, new_pmd);
205 if (need_rmap_locks)
206 anon_vma_unlock_write(vma->anon_vma);
207 }
201 if (err > 0) { 208 if (err > 0) {
202 need_flush = true; 209 need_flush = true;
203 continue; 210 continue;
diff --git a/mm/percpu.c b/mm/percpu.c
index 63e24fb4387b..2ddf9a990dbd 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
610 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 610 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
611 sizeof(chunk->map[0])); 611 sizeof(chunk->map[0]));
612 if (!chunk->map) { 612 if (!chunk->map) {
613 kfree(chunk); 613 pcpu_mem_free(chunk, pcpu_chunk_struct_size);
614 return NULL; 614 return NULL;
615 } 615 }
616 616