aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/nobootmem.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/percpu.c22
5 files changed, 25 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5a16423a512c..ae8f708e3d75 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2498,7 +2498,6 @@ retry_avoidcopy:
2498 if (outside_reserve) { 2498 if (outside_reserve) {
2499 BUG_ON(huge_pte_none(pte)); 2499 BUG_ON(huge_pte_none(pte));
2500 if (unmap_ref_private(mm, vma, old_page, address)) { 2500 if (unmap_ref_private(mm, vma, old_page, address)) {
2501 BUG_ON(page_count(old_page) != 1);
2502 BUG_ON(huge_pte_none(pte)); 2501 BUG_ON(huge_pte_none(pte));
2503 spin_lock(&mm->page_table_lock); 2502 spin_lock(&mm->page_table_lock);
2504 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2503 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 31ab9c3f0178..b659260c56ad 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4507,6 +4507,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4507swap_buffers: 4507swap_buffers:
4508 /* Swap primary and spare array */ 4508 /* Swap primary and spare array */
4509 thresholds->spare = thresholds->primary; 4509 thresholds->spare = thresholds->primary;
4510 /* If all events are unregistered, free the spare array */
4511 if (!new) {
4512 kfree(thresholds->spare);
4513 thresholds->spare = NULL;
4514 }
4515
4510 rcu_assign_pointer(thresholds->primary, new); 4516 rcu_assign_pointer(thresholds->primary, new);
4511 4517
4512 /* To be sure that nobody uses thresholds */ 4518 /* To be sure that nobody uses thresholds */
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index e53bb8a256b1..1983fb1c7026 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -82,8 +82,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
82 82
83static void __init __free_pages_memory(unsigned long start, unsigned long end) 83static void __init __free_pages_memory(unsigned long start, unsigned long end)
84{ 84{
85 int i; 85 unsigned long i, start_aligned, end_aligned;
86 unsigned long start_aligned, end_aligned;
87 int order = ilog2(BITS_PER_LONG); 86 int order = ilog2(BITS_PER_LONG);
88 87
89 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); 88 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a712fb9e04ce..918330f71dba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5203,7 +5203,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5203 int ret; 5203 int ret;
5204 5204
5205 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5205 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5206 if (!write || (ret == -EINVAL)) 5206 if (!write || (ret < 0))
5207 return ret; 5207 return ret;
5208 for_each_populated_zone(zone) { 5208 for_each_populated_zone(zone) {
5209 for_each_possible_cpu(cpu) { 5209 for_each_possible_cpu(cpu) {
diff --git a/mm/percpu.c b/mm/percpu.c
index f47af9123af7..bb4be7435ce3 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1132,20 +1132,20 @@ static void pcpu_dump_alloc_info(const char *lvl,
1132 for (alloc_end += gi->nr_units / upa; 1132 for (alloc_end += gi->nr_units / upa;
1133 alloc < alloc_end; alloc++) { 1133 alloc < alloc_end; alloc++) {
1134 if (!(alloc % apl)) { 1134 if (!(alloc % apl)) {
1135 printk("\n"); 1135 printk(KERN_CONT "\n");
1136 printk("%spcpu-alloc: ", lvl); 1136 printk("%spcpu-alloc: ", lvl);
1137 } 1137 }
1138 printk("[%0*d] ", group_width, group); 1138 printk(KERN_CONT "[%0*d] ", group_width, group);
1139 1139
1140 for (unit_end += upa; unit < unit_end; unit++) 1140 for (unit_end += upa; unit < unit_end; unit++)
1141 if (gi->cpu_map[unit] != NR_CPUS) 1141 if (gi->cpu_map[unit] != NR_CPUS)
1142 printk("%0*d ", cpu_width, 1142 printk(KERN_CONT "%0*d ", cpu_width,
1143 gi->cpu_map[unit]); 1143 gi->cpu_map[unit]);
1144 else 1144 else
1145 printk("%s ", empty_str); 1145 printk(KERN_CONT "%s ", empty_str);
1146 } 1146 }
1147 } 1147 }
1148 printk("\n"); 1148 printk(KERN_CONT "\n");
1149} 1149}
1150 1150
1151/** 1151/**
@@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1650 areas[group] = ptr; 1650 areas[group] = ptr;
1651 1651
1652 base = min(ptr, base); 1652 base = min(ptr, base);
1653 }
1654
1655 /*
1656 * Copy data and free unused parts. This should happen after all
1657 * allocations are complete; otherwise, we may end up with
1658 * overlapping groups.
1659 */
1660 for (group = 0; group < ai->nr_groups; group++) {
1661 struct pcpu_group_info *gi = &ai->groups[group];
1662 void *ptr = areas[group];
1653 1663
1654 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1664 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1655 if (gi->cpu_map[i] == NR_CPUS) { 1665 if (gi->cpu_map[i] == NR_CPUS) {
@@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void)
1885 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1895 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1886 if (!ai || !fc) 1896 if (!ai || !fc)
1887 panic("Failed to allocate memory for percpu areas."); 1897 panic("Failed to allocate memory for percpu areas.");
1898 /* kmemleak tracks the percpu allocations separately */
1899 kmemleak_free(fc);
1888 1900
1889 ai->dyn_size = unit_size; 1901 ai->dyn_size = unit_size;
1890 ai->unit_size = unit_size; 1902 ai->unit_size = unit_size;